Hello Gao Xiang,

This is a semi-automatic email about new static checker warnings.

The patch 97e86a858bc3: "staging: erofs: tidy up decompression
frontend" from Jul 31, 2019, leads to the following Smatch complaint:

    fs/erofs/zdata.c:670 z_erofs_do_read_page()
    error: we previously assumed 'clt->cl' could be null (see line 596)

fs/erofs/zdata.c
   595                  /* didn't get a valid collection previously (very rare) 
*/
   596                  if (!clt->cl)
                            ^^^^^^^^
New NULL check.

   597                          goto restart_now;
   598                  goto hitted;
   599          }
   600  
   601          /* go ahead the next map_blocks */
   602          debugln("%s: [out-of-range] pos %llu", __func__, offset + cur);
   603  
   604          if (z_erofs_collector_end(clt))
   605                  fe->backmost = false;
   606  
   607          map->m_la = offset + cur;
   608          map->m_llen = 0;
   609          err = z_erofs_map_blocks_iter(inode, map, 0);
   610          if (unlikely(err))
   611                  goto err_out;
   612  
   613  restart_now:
   614          if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED)))
   615                  goto hitted;
   616  
   617          err = z_erofs_collector_begin(clt, inode, map);
   618          if (unlikely(err))
   619                  goto err_out;
   620  
   621          /* preload all compressed pages (maybe downgrade role if 
necessary) */
   622          if (should_alloc_managed_pages(fe, sbi->cache_strategy, 
map->m_la))
   623                  cache_strategy = DELAYEDALLOC;
   624          else
   625                  cache_strategy = DONTALLOC;
   626  
   627          preload_compressed_pages(clt, MNGD_MAPPING(sbi),
   628                                   cache_strategy, pagepool);
   629  
   630          tight &= (clt->mode >= COLLECT_PRIMARY_HOOKED);
   631  hitted:
   632          cur = end - min_t(unsigned int, offset + end - map->m_la, end);
   633          if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED))) {
   634                  zero_user_segment(page, cur, end);
   635                  goto next_part;
   636          }
   637  
   638          /* let's derive page type */
   639          page_type = cur ? Z_EROFS_VLE_PAGE_TYPE_HEAD :
   640                  (!spiltted ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
   641                          (tight ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
   642                                  Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED));
   643  
   644          if (cur)
   645                  tight &= (clt->mode >= COLLECT_PRIMARY_FOLLOWED);
   646  
   647  retry:
   648          err = z_erofs_attach_page(clt, page, page_type);
   649          /* should allocate an additional staging page for pagevec */
   650          if (err == -EAGAIN) {
   651                  struct page *const newpage =
   652                          __stagingpage_alloc(pagepool, GFP_NOFS);
   653  
   654                  err = z_erofs_attach_page(clt, newpage,
   655                                            Z_EROFS_PAGE_TYPE_EXCLUSIVE);
   656                  if (likely(!err))
   657                          goto retry;
   658          }
   659  
   660          if (unlikely(err))
   661                  goto err_out;
   662  
   663          index = page->index - (map->m_la >> PAGE_SHIFT);
   664  
   665          z_erofs_onlinepage_fixup(page, index, true);
   666  
   667          /* bump up the number of spiltted parts of a page */
   668          ++spiltted;
   669          /* also update nr_pages */
   670          clt->cl->nr_pages = max_t(pgoff_t, clt->cl->nr_pages, index + 
1);
                ^^^^^^^^^^^^^^^^^                  ^^^^^^^^^^^^^^^^^
Unchecked dereferences.

   671  next_part:
   672          /* can be used for verification */

regards,
dan carpenter

Reply via email to