tree:   https://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/cache
head:   f61050aefc0ca1c0b3e93114eadd0a910a66202b
commit: 0438fb1aebf428efcdce64ef4ec610e93e0006f9 [29/38] x86/intel_rdt: 
Pseudo-lock region creation/removal core
config: i386-allmodconfig (attached as .config)
compiler: gcc-7 (Debian 7.3.0-16) 7.3.0
reproduce:
        git checkout 0438fb1aebf428efcdce64ef4ec610e93e0006f9
        # save the attached .config to linux build tree
        make ARCH=i386 

All warnings (new ones prefixed by >>):

   arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c: In function 'pseudo_lock_fn':
>> arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c:363:1: warning: unsupported size 
>> for integer register
    }
    ^
>> arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c:363:1: warning: unsupported size 
>> for integer register

vim +363 arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c

   280  
   281          /*
   282           * Make sure none of the allocated memory is cached. If it is we
   283           * will get a cache hit in below loop from outside of 
pseudo-locked
   284           * region.
   285           * wbinvd (as opposed to clflush/clflushopt) is required to
   286           * increase likelihood that allocated cache portion will be 
filled
   287           * with associated memory.
   288           */
   289          native_wbinvd();
   290  
   291          /*
   292           * Always called with interrupts enabled. By disabling 
interrupts
   293           * ensure that we will not be preempted during this critical 
section.
   294           */
   295          local_irq_disable();
   296  
   297          /*
   298           * Call wrmsr and rdmsr as directly as possible to avoid tracing
   299           * clobbering local register variables or affecting cache 
accesses.
   300           *
   301           * Disable the hardware prefetcher so that when the end of the 
memory
   302           * being pseudo-locked is reached the hardware will not read 
beyond
   303           * the buffer and evict pseudo-locked memory read earlier from 
the
   304           * cache.
   305           */
   306          __wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0);
   307          closid_p = this_cpu_read(pqr_state.cur_closid);
   308          rmid_p = this_cpu_read(pqr_state.cur_rmid);
   309          mem_r = plr->kmem;
   310          size = plr->size;
   311          line_size = plr->line_size;
   312          /*
   313           * Critical section begin: start by writing the closid 
associated
   314           * with the capacity bitmask of the cache region being
   315           * pseudo-locked followed by reading of kernel memory to load it
   316           * into the cache.
   317           */
   318          __wrmsr(IA32_PQR_ASSOC, rmid_p, rdtgrp->closid);
   319          /*
   320           * Cache was flushed earlier. Now access kernel memory to read 
it
   321           * into cache region associated with just activated plr->closid.
   322           * Loop over data twice:
   323           * - In first loop the cache region is shared with the page 
walker
   324           *   as it populates the paging structure caches (including 
TLB).
   325           * - In the second loop the paging structure caches are used and
   326           *   cache region is populated with the memory being referenced.
   327           */
   328          for (i = 0; i < size; i += PAGE_SIZE) {
   329                  /*
   330                   * Add a barrier to prevent speculative execution of 
this
   331                   * loop reading beyond the end of the buffer.
   332                   */
   333                  rmb();
   334                  asm volatile("mov (%0,%1,1), %%eax\n\t"
   335                          :
   336                          : "r" (mem_r), "r" (i)
   337                          : "%eax", "memory");
   338          }
   339          for (i = 0; i < size; i += line_size) {
   340                  /*
   341                   * Add a barrier to prevent speculative execution of 
this
   342                   * loop reading beyond the end of the buffer.
   343                   */
   344                  rmb();
   345                  asm volatile("mov (%0,%1,1), %%eax\n\t"
   346                          :
   347                          : "r" (mem_r), "r" (i)
   348                          : "%eax", "memory");
   349          }
   350          /*
   351           * Critical section end: restore closid with capacity bitmask 
that
   352           * does not overlap with pseudo-locked region.
   353           */
   354          __wrmsr(IA32_PQR_ASSOC, rmid_p, closid_p);
   355  
   356          /* Re-enable the hardware prefetcher(s) */
   357          wrmsr(MSR_MISC_FEATURE_CONTROL, 0x0, 0x0);
   358          local_irq_enable();
   359  
   360          plr->thread_done = 1;
   361          wake_up_interruptible(&plr->lock_thread_wq);
   362          return 0;
 > 363  }
   364  

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation

Attachment: .config.gz
Description: application/gzip

Reply via email to