tree:   https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git 
master
head:   1e28eed17697bcf343c6743f0028cc3b5dd88bf0
commit: 03c62d886dd6d3dfebf59d385a37245fe667fe90 drm/i915: Make GEM errors 
non-fatal by default
date:   8 weeks ago
config: x86_64-randconfig-m001-20210316 (attached as .config)
compiler: gcc-9 (Debian 9.3.0-22) 9.3.0

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <l...@intel.com>

New smatch warnings:
drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c:233 
igt_fill_blt_thread() error: 'ce' dereferencing possible ERR_PTR()
drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c:351 
igt_copy_blt_thread() error: 'ce' dereferencing possible ERR_PTR()
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c:693 igt_ctx_exec() error: 
'ce' dereferencing possible ERR_PTR()
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c:834 igt_shared_ctx_exec() 
error: 'ce' dereferencing possible ERR_PTR()
drivers/gpu/drm/i915/selftests/i915_request.c:216 igt_request_rewind() error: 
'ce' dereferencing possible ERR_PTR()
drivers/gpu/drm/i915/selftests/i915_request.c:351 __igt_breadcrumbs_smoketest() 
error: 'ce' dereferencing possible ERR_PTR()

Old smatch warnings:
drivers/gpu/drm/i915/gem/i915_gem_object.h:127 __i915_gem_object_lock() error: 
we previously assumed 'ww' could be null (see line 119)
drivers/gpu/drm/i915/gem/i915_gem_object_blt.c:140 move_obj_to_gpu() warn: 
maybe use && instead of &
drivers/gpu/drm/i915/gem/i915_gem_context.h:204 i915_gem_context_get_engine() 
warn: inconsistent indenting
drivers/gpu/drm/i915/gem/i915_gem_context.h:206 i915_gem_context_get_engine() 
warn: inconsistent indenting
drivers/gpu/drm/i915/gem/i915_gem_context.h:204 i915_gem_context_get_engine() 
warn: inconsistent indenting
drivers/gpu/drm/i915/gem/i915_gem_context.h:206 i915_gem_context_get_engine() 
warn: inconsistent indenting
drivers/gpu/drm/i915/gem/i915_gem_object.h:127 __i915_gem_object_lock() error: 
we previously assumed 'ww' could be null (see line 119)
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c:709 igt_ctx_exec() error: 
'ce' dereferencing possible ERR_PTR()
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c:716 igt_ctx_exec() error: 
'ce' dereferencing possible ERR_PTR()
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c:729 igt_ctx_exec() error: 
'ce' dereferencing possible ERR_PTR()
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c:847 igt_shared_ctx_exec() 
error: 'ce' dereferencing possible ERR_PTR()
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c:854 igt_shared_ctx_exec() 
error: 'ce' dereferencing possible ERR_PTR()
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c:867 igt_shared_ctx_exec() 
error: 'ce' dereferencing possible ERR_PTR()
drivers/gpu/drm/i915/selftests/i915_request.c:230 igt_request_rewind() error: 
'ce' dereferencing possible ERR_PTR()
drivers/gpu/drm/i915/gem/i915_gem_context.h:204 i915_gem_context_get_engine() 
warn: inconsistent indenting
drivers/gpu/drm/i915/gem/i915_gem_context.h:206 i915_gem_context_get_engine() 
warn: inconsistent indenting

vim +/ce +233 drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c

0e99f939f08fc3 Matthew Auld   2019-10-25  202  
0e99f939f08fc3 Matthew Auld   2019-10-25  203  static int 
igt_fill_blt_thread(void *arg)
6501aa4e3a4507 Matthew Auld   2019-05-29  204  {
0e99f939f08fc3 Matthew Auld   2019-10-25  205   struct igt_thread_arg *thread = 
arg;
d61345f342981f Chris Wilson   2020-06-04  206   struct intel_engine_cs *engine 
= thread->engine;
0e99f939f08fc3 Matthew Auld   2019-10-25  207   struct rnd_state *prng = 
&thread->prng;
6501aa4e3a4507 Matthew Auld   2019-05-29  208   struct drm_i915_gem_object *obj;
0e99f939f08fc3 Matthew Auld   2019-10-25  209   struct i915_gem_context *ctx;
0e99f939f08fc3 Matthew Auld   2019-10-25  210   struct intel_context *ce;
0e99f939f08fc3 Matthew Auld   2019-10-25  211   unsigned int prio;
6501aa4e3a4507 Matthew Auld   2019-05-29  212   IGT_TIMEOUT(end);
4746fd5c2c1322 Chris Wilson   2020-02-10  213   u64 total, max;
0e99f939f08fc3 Matthew Auld   2019-10-25  214   int err;
6501aa4e3a4507 Matthew Auld   2019-05-29  215  
34485832cb9872 Chris Wilson   2019-11-11  216   ctx = thread->ctx;
34485832cb9872 Chris Wilson   2019-11-11  217   if (!ctx) {
d61345f342981f Chris Wilson   2020-06-04  218           ctx = 
live_context_for_engine(engine, thread->file);
34485832cb9872 Chris Wilson   2019-11-11  219           if (IS_ERR(ctx))
34485832cb9872 Chris Wilson   2019-11-11  220                   return 
PTR_ERR(ctx);
0e99f939f08fc3 Matthew Auld   2019-10-25  221  
0e99f939f08fc3 Matthew Auld   2019-10-25  222           prio = 
i915_prandom_u32_max_state(I915_PRIORITY_MAX, prng);
0e99f939f08fc3 Matthew Auld   2019-10-25  223           ctx->sched.priority = 
I915_USER_PRIORITY(prio);
34485832cb9872 Chris Wilson   2019-11-11  224   }
0e99f939f08fc3 Matthew Auld   2019-10-25  225  
d61345f342981f Chris Wilson   2020-06-04  226   ce = 
i915_gem_context_get_engine(ctx, 0);
0e99f939f08fc3 Matthew Auld   2019-10-25  227   GEM_BUG_ON(IS_ERR(ce));
554e330ceb9f00 Matthew Auld   2019-08-10  228  
4746fd5c2c1322 Chris Wilson   2020-02-10  229   /*
4746fd5c2c1322 Chris Wilson   2020-02-10  230    * If we have a tiny shared 
address space, like for the GGTT
4746fd5c2c1322 Chris Wilson   2020-02-10  231    * then we can't be too greedy.
4746fd5c2c1322 Chris Wilson   2020-02-10  232    */
4746fd5c2c1322 Chris Wilson   2020-02-10 @233   max = ce->vm->total;
4746fd5c2c1322 Chris Wilson   2020-02-10  234   if (i915_is_ggtt(ce->vm) || 
thread->ctx)
4746fd5c2c1322 Chris Wilson   2020-02-10  235           max = div_u64(max, 
thread->n_cpus);
4746fd5c2c1322 Chris Wilson   2020-02-10  236   max >>= 4;
4746fd5c2c1322 Chris Wilson   2020-02-10  237  
4746fd5c2c1322 Chris Wilson   2020-02-10  238   total = PAGE_SIZE;
6501aa4e3a4507 Matthew Auld   2019-05-29  239   do {
4746fd5c2c1322 Chris Wilson   2020-02-10  240           /* Aim to keep the 
runtime under reasonable bounds! */
4746fd5c2c1322 Chris Wilson   2020-02-10  241           const u32 max_phys_size 
= SZ_64K;
0e99f939f08fc3 Matthew Auld   2019-10-25  242           u32 val = 
prandom_u32_state(prng);
0e99f939f08fc3 Matthew Auld   2019-10-25  243           u32 phys_sz;
0e99f939f08fc3 Matthew Auld   2019-10-25  244           u32 sz;
0e99f939f08fc3 Matthew Auld   2019-10-25  245           u32 *vaddr;
6501aa4e3a4507 Matthew Auld   2019-05-29  246           u32 i;
6501aa4e3a4507 Matthew Auld   2019-05-29  247  
4746fd5c2c1322 Chris Wilson   2020-02-10  248           total = min(total, max);
4746fd5c2c1322 Chris Wilson   2020-02-10  249           sz = 
i915_prandom_u32_max_state(total, prng) + 1;
c8b56cd01433af Chris Wilson   2020-02-12  250           phys_sz = sz % 
max_phys_size + 1;
0e99f939f08fc3 Matthew Auld   2019-10-25  251  
6501aa4e3a4507 Matthew Auld   2019-05-29  252           sz = round_up(sz, 
PAGE_SIZE);
554e330ceb9f00 Matthew Auld   2019-08-10  253           phys_sz = 
round_up(phys_sz, PAGE_SIZE);
c8b56cd01433af Chris Wilson   2020-02-12  254           phys_sz = min(phys_sz, 
sz);
6501aa4e3a4507 Matthew Auld   2019-05-29  255  
554e330ceb9f00 Matthew Auld   2019-08-10  256           pr_debug("%s with 
phys_sz= %x, sz=%x, val=%x\n", __func__,
554e330ceb9f00 Matthew Auld   2019-08-10  257                    phys_sz, sz, 
val);
6501aa4e3a4507 Matthew Auld   2019-05-29  258  
d61345f342981f Chris Wilson   2020-06-04  259           obj = 
huge_gem_object(engine->i915, phys_sz, sz);
6501aa4e3a4507 Matthew Auld   2019-05-29  260           if (IS_ERR(obj)) {
fd1e194f4869dc Colin Ian King 2019-05-31  261                   err = 
PTR_ERR(obj);
6501aa4e3a4507 Matthew Auld   2019-05-29  262                   goto err_flush;
6501aa4e3a4507 Matthew Auld   2019-05-29  263           }
6501aa4e3a4507 Matthew Auld   2019-05-29  264  
6501aa4e3a4507 Matthew Auld   2019-05-29  265           vaddr = 
i915_gem_object_pin_map(obj, I915_MAP_WB);
6501aa4e3a4507 Matthew Auld   2019-05-29  266           if (IS_ERR(vaddr)) {
6501aa4e3a4507 Matthew Auld   2019-05-29  267                   err = 
PTR_ERR(vaddr);
6501aa4e3a4507 Matthew Auld   2019-05-29  268                   goto err_put;
6501aa4e3a4507 Matthew Auld   2019-05-29  269           }
6501aa4e3a4507 Matthew Auld   2019-05-29  270  
6501aa4e3a4507 Matthew Auld   2019-05-29  271           /*
6501aa4e3a4507 Matthew Auld   2019-05-29  272            * Make sure the 
potentially async clflush does its job, if
6501aa4e3a4507 Matthew Auld   2019-05-29  273            * required.
6501aa4e3a4507 Matthew Auld   2019-05-29  274            */
554e330ceb9f00 Matthew Auld   2019-08-10  275           memset32(vaddr, val ^ 
0xdeadbeaf,
554e330ceb9f00 Matthew Auld   2019-08-10  276                    
huge_gem_object_phys_size(obj) / sizeof(u32));
6501aa4e3a4507 Matthew Auld   2019-05-29  277  
6501aa4e3a4507 Matthew Auld   2019-05-29  278           if 
(!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
6501aa4e3a4507 Matthew Auld   2019-05-29  279                   
obj->cache_dirty = true;
6501aa4e3a4507 Matthew Auld   2019-05-29  280  
6501aa4e3a4507 Matthew Auld   2019-05-29  281           err = 
i915_gem_object_fill_blt(obj, ce, val);
6501aa4e3a4507 Matthew Auld   2019-05-29  282           if (err)
6501aa4e3a4507 Matthew Auld   2019-05-29  283                   goto err_unpin;
6501aa4e3a4507 Matthew Auld   2019-05-29  284  
4746fd5c2c1322 Chris Wilson   2020-02-10  285           err = 
i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT);
6501aa4e3a4507 Matthew Auld   2019-05-29  286           if (err)
6501aa4e3a4507 Matthew Auld   2019-05-29  287                   goto err_unpin;
6501aa4e3a4507 Matthew Auld   2019-05-29  288  
4746fd5c2c1322 Chris Wilson   2020-02-10  289           for (i = 0; i < 
huge_gem_object_phys_size(obj) / sizeof(u32); i += 17) {
4746fd5c2c1322 Chris Wilson   2020-02-10  290                   if 
(!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
4746fd5c2c1322 Chris Wilson   2020-02-10  291                           
drm_clflush_virt_range(&vaddr[i], sizeof(vaddr[i]));
4746fd5c2c1322 Chris Wilson   2020-02-10  292  
6501aa4e3a4507 Matthew Auld   2019-05-29  293                   if (vaddr[i] != 
val) {
6501aa4e3a4507 Matthew Auld   2019-05-29  294                           
pr_err("vaddr[%u]=%x, expected=%x\n", i,
6501aa4e3a4507 Matthew Auld   2019-05-29  295                                  
vaddr[i], val);
6501aa4e3a4507 Matthew Auld   2019-05-29  296                           err = 
-EINVAL;
6501aa4e3a4507 Matthew Auld   2019-05-29  297                           goto 
err_unpin;
6501aa4e3a4507 Matthew Auld   2019-05-29  298                   }
6501aa4e3a4507 Matthew Auld   2019-05-29  299           }
6501aa4e3a4507 Matthew Auld   2019-05-29  300  
6501aa4e3a4507 Matthew Auld   2019-05-29  301           
i915_gem_object_unpin_map(obj);
6501aa4e3a4507 Matthew Auld   2019-05-29  302           
i915_gem_object_put(obj);
4746fd5c2c1322 Chris Wilson   2020-02-10  303  
4746fd5c2c1322 Chris Wilson   2020-02-10  304           total <<= 1;
6501aa4e3a4507 Matthew Auld   2019-05-29  305   } while (!time_after(jiffies, 
end));
6501aa4e3a4507 Matthew Auld   2019-05-29  306  
6501aa4e3a4507 Matthew Auld   2019-05-29  307   goto err_flush;
6501aa4e3a4507 Matthew Auld   2019-05-29  308  
6501aa4e3a4507 Matthew Auld   2019-05-29  309  err_unpin:
6501aa4e3a4507 Matthew Auld   2019-05-29  310   i915_gem_object_unpin_map(obj);
6501aa4e3a4507 Matthew Auld   2019-05-29  311  err_put:
6501aa4e3a4507 Matthew Auld   2019-05-29  312   i915_gem_object_put(obj);
6501aa4e3a4507 Matthew Auld   2019-05-29  313  err_flush:
6501aa4e3a4507 Matthew Auld   2019-05-29  314   if (err == -ENOMEM)
6501aa4e3a4507 Matthew Auld   2019-05-29  315           err = 0;
6501aa4e3a4507 Matthew Auld   2019-05-29  316  
0e99f939f08fc3 Matthew Auld   2019-10-25  317   intel_context_put(ce);
6501aa4e3a4507 Matthew Auld   2019-05-29  318   return err;
6501aa4e3a4507 Matthew Auld   2019-05-29  319  }
6501aa4e3a4507 Matthew Auld   2019-05-29  320  
0e99f939f08fc3 Matthew Auld   2019-10-25  321  static int 
igt_copy_blt_thread(void *arg)
05f219d709ec57 Matthew Auld   2019-08-10  322  {
0e99f939f08fc3 Matthew Auld   2019-10-25  323   struct igt_thread_arg *thread = 
arg;
d61345f342981f Chris Wilson   2020-06-04  324   struct intel_engine_cs *engine 
= thread->engine;
0e99f939f08fc3 Matthew Auld   2019-10-25  325   struct rnd_state *prng = 
&thread->prng;
05f219d709ec57 Matthew Auld   2019-08-10  326   struct drm_i915_gem_object 
*src, *dst;
0e99f939f08fc3 Matthew Auld   2019-10-25  327   struct i915_gem_context *ctx;
0e99f939f08fc3 Matthew Auld   2019-10-25  328   struct intel_context *ce;
0e99f939f08fc3 Matthew Auld   2019-10-25  329   unsigned int prio;
05f219d709ec57 Matthew Auld   2019-08-10  330   IGT_TIMEOUT(end);
4746fd5c2c1322 Chris Wilson   2020-02-10  331   u64 total, max;
0e99f939f08fc3 Matthew Auld   2019-10-25  332   int err;
05f219d709ec57 Matthew Auld   2019-08-10  333  
34485832cb9872 Chris Wilson   2019-11-11  334   ctx = thread->ctx;
34485832cb9872 Chris Wilson   2019-11-11  335   if (!ctx) {
d61345f342981f Chris Wilson   2020-06-04  336           ctx = 
live_context_for_engine(engine, thread->file);
34485832cb9872 Chris Wilson   2019-11-11  337           if (IS_ERR(ctx))
34485832cb9872 Chris Wilson   2019-11-11  338                   return 
PTR_ERR(ctx);
0e99f939f08fc3 Matthew Auld   2019-10-25  339  
0e99f939f08fc3 Matthew Auld   2019-10-25  340           prio = 
i915_prandom_u32_max_state(I915_PRIORITY_MAX, prng);
0e99f939f08fc3 Matthew Auld   2019-10-25  341           ctx->sched.priority = 
I915_USER_PRIORITY(prio);
34485832cb9872 Chris Wilson   2019-11-11  342   }
0e99f939f08fc3 Matthew Auld   2019-10-25  343  
d61345f342981f Chris Wilson   2020-06-04  344   ce = 
i915_gem_context_get_engine(ctx, 0);
0e99f939f08fc3 Matthew Auld   2019-10-25  345   GEM_BUG_ON(IS_ERR(ce));
05f219d709ec57 Matthew Auld   2019-08-10  346  
4746fd5c2c1322 Chris Wilson   2020-02-10  347   /*
4746fd5c2c1322 Chris Wilson   2020-02-10  348    * If we have a tiny shared 
address space, like for the GGTT
4746fd5c2c1322 Chris Wilson   2020-02-10  349    * then we can't be too greedy.
4746fd5c2c1322 Chris Wilson   2020-02-10  350    */
4746fd5c2c1322 Chris Wilson   2020-02-10 @351   max = ce->vm->total;
4746fd5c2c1322 Chris Wilson   2020-02-10  352   if (i915_is_ggtt(ce->vm) || 
thread->ctx)
4746fd5c2c1322 Chris Wilson   2020-02-10  353           max = div_u64(max, 
thread->n_cpus);
4746fd5c2c1322 Chris Wilson   2020-02-10  354   max >>= 4;
4746fd5c2c1322 Chris Wilson   2020-02-10  355  
4746fd5c2c1322 Chris Wilson   2020-02-10  356   total = PAGE_SIZE;
05f219d709ec57 Matthew Auld   2019-08-10  357   do {
4746fd5c2c1322 Chris Wilson   2020-02-10  358           /* Aim to keep the 
runtime under reasonable bounds! */
4746fd5c2c1322 Chris Wilson   2020-02-10  359           const u32 max_phys_size 
= SZ_64K;
0e99f939f08fc3 Matthew Auld   2019-10-25  360           u32 val = 
prandom_u32_state(prng);
0e99f939f08fc3 Matthew Auld   2019-10-25  361           u32 phys_sz;
0e99f939f08fc3 Matthew Auld   2019-10-25  362           u32 sz;
0e99f939f08fc3 Matthew Auld   2019-10-25  363           u32 *vaddr;
05f219d709ec57 Matthew Auld   2019-08-10  364           u32 i;
05f219d709ec57 Matthew Auld   2019-08-10  365  
4746fd5c2c1322 Chris Wilson   2020-02-10  366           total = min(total, max);
4746fd5c2c1322 Chris Wilson   2020-02-10  367           sz = 
i915_prandom_u32_max_state(total, prng) + 1;
c8b56cd01433af Chris Wilson   2020-02-12  368           phys_sz = sz % 
max_phys_size + 1;
0e99f939f08fc3 Matthew Auld   2019-10-25  369  
05f219d709ec57 Matthew Auld   2019-08-10  370           sz = round_up(sz, 
PAGE_SIZE);
05f219d709ec57 Matthew Auld   2019-08-10  371           phys_sz = 
round_up(phys_sz, PAGE_SIZE);
c8b56cd01433af Chris Wilson   2020-02-12  372           phys_sz = min(phys_sz, 
sz);
05f219d709ec57 Matthew Auld   2019-08-10  373  
05f219d709ec57 Matthew Auld   2019-08-10  374           pr_debug("%s with 
phys_sz= %x, sz=%x, val=%x\n", __func__,
05f219d709ec57 Matthew Auld   2019-08-10  375                    phys_sz, sz, 
val);
05f219d709ec57 Matthew Auld   2019-08-10  376  
d61345f342981f Chris Wilson   2020-06-04  377           src = 
huge_gem_object(engine->i915, phys_sz, sz);
05f219d709ec57 Matthew Auld   2019-08-10  378           if (IS_ERR(src)) {
05f219d709ec57 Matthew Auld   2019-08-10  379                   err = 
PTR_ERR(src);
05f219d709ec57 Matthew Auld   2019-08-10  380                   goto err_flush;
05f219d709ec57 Matthew Auld   2019-08-10  381           }
05f219d709ec57 Matthew Auld   2019-08-10  382  
05f219d709ec57 Matthew Auld   2019-08-10  383           vaddr = 
i915_gem_object_pin_map(src, I915_MAP_WB);
05f219d709ec57 Matthew Auld   2019-08-10  384           if (IS_ERR(vaddr)) {
05f219d709ec57 Matthew Auld   2019-08-10  385                   err = 
PTR_ERR(vaddr);
05f219d709ec57 Matthew Auld   2019-08-10  386                   goto 
err_put_src;
05f219d709ec57 Matthew Auld   2019-08-10  387           }
05f219d709ec57 Matthew Auld   2019-08-10  388  
05f219d709ec57 Matthew Auld   2019-08-10  389           memset32(vaddr, val,
05f219d709ec57 Matthew Auld   2019-08-10  390                    
huge_gem_object_phys_size(src) / sizeof(u32));
05f219d709ec57 Matthew Auld   2019-08-10  391  
05f219d709ec57 Matthew Auld   2019-08-10  392           
i915_gem_object_unpin_map(src);
05f219d709ec57 Matthew Auld   2019-08-10  393  
05f219d709ec57 Matthew Auld   2019-08-10  394           if 
(!(src->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
05f219d709ec57 Matthew Auld   2019-08-10  395                   
src->cache_dirty = true;
05f219d709ec57 Matthew Auld   2019-08-10  396  
d61345f342981f Chris Wilson   2020-06-04  397           dst = 
huge_gem_object(engine->i915, phys_sz, sz);
05f219d709ec57 Matthew Auld   2019-08-10  398           if (IS_ERR(dst)) {
05f219d709ec57 Matthew Auld   2019-08-10  399                   err = 
PTR_ERR(dst);
05f219d709ec57 Matthew Auld   2019-08-10  400                   goto 
err_put_src;
05f219d709ec57 Matthew Auld   2019-08-10  401           }
05f219d709ec57 Matthew Auld   2019-08-10  402  
05f219d709ec57 Matthew Auld   2019-08-10  403           vaddr = 
i915_gem_object_pin_map(dst, I915_MAP_WB);
05f219d709ec57 Matthew Auld   2019-08-10  404           if (IS_ERR(vaddr)) {
05f219d709ec57 Matthew Auld   2019-08-10  405                   err = 
PTR_ERR(vaddr);
05f219d709ec57 Matthew Auld   2019-08-10  406                   goto 
err_put_dst;
05f219d709ec57 Matthew Auld   2019-08-10  407           }
05f219d709ec57 Matthew Auld   2019-08-10  408  
05f219d709ec57 Matthew Auld   2019-08-10  409           memset32(vaddr, val ^ 
0xdeadbeaf,
05f219d709ec57 Matthew Auld   2019-08-10  410                    
huge_gem_object_phys_size(dst) / sizeof(u32));
05f219d709ec57 Matthew Auld   2019-08-10  411  
05f219d709ec57 Matthew Auld   2019-08-10  412           if 
(!(dst->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
05f219d709ec57 Matthew Auld   2019-08-10  413                   
dst->cache_dirty = true;
05f219d709ec57 Matthew Auld   2019-08-10  414  
05f219d709ec57 Matthew Auld   2019-08-10  415           err = 
i915_gem_object_copy_blt(src, dst, ce);
05f219d709ec57 Matthew Auld   2019-08-10  416           if (err)
05f219d709ec57 Matthew Auld   2019-08-10  417                   goto err_unpin;
05f219d709ec57 Matthew Auld   2019-08-10  418  
4746fd5c2c1322 Chris Wilson   2020-02-10  419           err = 
i915_gem_object_wait(dst, 0, MAX_SCHEDULE_TIMEOUT);
05f219d709ec57 Matthew Auld   2019-08-10  420           if (err)
05f219d709ec57 Matthew Auld   2019-08-10  421                   goto err_unpin;
05f219d709ec57 Matthew Auld   2019-08-10  422  
4746fd5c2c1322 Chris Wilson   2020-02-10  423           for (i = 0; i < 
huge_gem_object_phys_size(dst) / sizeof(u32); i += 17) {
4746fd5c2c1322 Chris Wilson   2020-02-10  424                   if 
(!(dst->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
4746fd5c2c1322 Chris Wilson   2020-02-10  425                           
drm_clflush_virt_range(&vaddr[i], sizeof(vaddr[i]));
4746fd5c2c1322 Chris Wilson   2020-02-10  426  
05f219d709ec57 Matthew Auld   2019-08-10  427                   if (vaddr[i] != 
val) {
05f219d709ec57 Matthew Auld   2019-08-10  428                           
pr_err("vaddr[%u]=%x, expected=%x\n", i,
05f219d709ec57 Matthew Auld   2019-08-10  429                                  
vaddr[i], val);
05f219d709ec57 Matthew Auld   2019-08-10  430                           err = 
-EINVAL;
05f219d709ec57 Matthew Auld   2019-08-10  431                           goto 
err_unpin;
05f219d709ec57 Matthew Auld   2019-08-10  432                   }
05f219d709ec57 Matthew Auld   2019-08-10  433           }
05f219d709ec57 Matthew Auld   2019-08-10  434  
05f219d709ec57 Matthew Auld   2019-08-10  435           
i915_gem_object_unpin_map(dst);
05f219d709ec57 Matthew Auld   2019-08-10  436  
05f219d709ec57 Matthew Auld   2019-08-10  437           
i915_gem_object_put(src);
05f219d709ec57 Matthew Auld   2019-08-10  438           
i915_gem_object_put(dst);
4746fd5c2c1322 Chris Wilson   2020-02-10  439  
4746fd5c2c1322 Chris Wilson   2020-02-10  440           total <<= 1;
05f219d709ec57 Matthew Auld   2019-08-10  441   } while (!time_after(jiffies, 
end));
05f219d709ec57 Matthew Auld   2019-08-10  442  
05f219d709ec57 Matthew Auld   2019-08-10  443   goto err_flush;
05f219d709ec57 Matthew Auld   2019-08-10  444  
05f219d709ec57 Matthew Auld   2019-08-10  445  err_unpin:
05f219d709ec57 Matthew Auld   2019-08-10  446   i915_gem_object_unpin_map(dst);
05f219d709ec57 Matthew Auld   2019-08-10  447  err_put_dst:
05f219d709ec57 Matthew Auld   2019-08-10  448   i915_gem_object_put(dst);
05f219d709ec57 Matthew Auld   2019-08-10  449  err_put_src:
05f219d709ec57 Matthew Auld   2019-08-10  450   i915_gem_object_put(src);
05f219d709ec57 Matthew Auld   2019-08-10  451  err_flush:
05f219d709ec57 Matthew Auld   2019-08-10  452   if (err == -ENOMEM)
05f219d709ec57 Matthew Auld   2019-08-10  453           err = 0;
05f219d709ec57 Matthew Auld   2019-08-10  454  
0e99f939f08fc3 Matthew Auld   2019-10-25  455   intel_context_put(ce);
05f219d709ec57 Matthew Auld   2019-08-10  456   return err;
05f219d709ec57 Matthew Auld   2019-08-10  457  }
05f219d709ec57 Matthew Auld   2019-08-10  458  

:::::: The code at line 233 was first introduced by commit
:::::: 4746fd5c2c132225bdd817ddf04e8454e872f8c2 drm/i915/selftests: Trim 
blitter block size

:::::: TO: Chris Wilson <ch...@chris-wilson.co.uk>
:::::: CC: Chris Wilson <ch...@chris-wilson.co.uk>

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-...@lists.01.org

Attachment: .config.gz
Description: application/gzip

Reply via email to