On Sat, Jul 29, 2023 at 03:23:47PM +0200, Peter J. Philipp wrote:
> Hi,
> 
> For a few hours I went grepping for MOD FPE conditions in the source code.
> I did this systematically examining them and here is my recommendations in
> form of patches for these spots.  It's half the effort but I'm really wasted
> right now, and can't go on.  Perhaps another time I'll continue.
> 
> grep Logfile with comments below my signature.  It is 20 patches, goes to
> ENDHERE

Here is the second part, I committed a few hours work into this...I'm gonna
take a break now.  There is a theoretical denial of service I found when a 
specially crafted wifi frame offers a beacon with a interval of 0.  Please 
give that some attention.

Best Regards,
-peter

ENDHERE, LATERMORE

dev/ic/malo.c:              (i + 1) % count * sizeof(struct malo_rx_desc));
dev/ic/malo.c:              (i + 1) % count * sizeof(struct malo_tx_desc));

*safe

dev/ic/mfi.c:   disk = bd->bd_diskid % ld[vol].mlc_parm.mpa_no_drv_per_span;

--- mfi.c       Fri Oct 21 07:20:48 2022
+++ /tmp/tfile  Sun Jul 30 10:42:45 2023
@@ -1820,7 +1820,11 @@
        arr = ld[vol].mlc_span[span].mls_index;
 
        /* offset disk into pd list */
-       disk = bd->bd_diskid % ld[vol].mlc_parm.mpa_no_drv_per_span;
+       if (ld[vol].mlc_parm.mpa_span_depth > 1)        /* XXX */
+               disk = bd->bd_diskid % ld[vol].mlc_parm.mpa_no_drv_per_span;
+       else
+               disk = 0;
+
        bd->bd_target = ar[arr].pd[disk].mar_enc_slot;
 
        /* get status */


dev/ic/nvme.c:  isqe->cid = blkno % 0xffff;
dev/ic/nvme.c:                      (icqe->cid != blkno % 0xffff))

*safe


dev/ic/rt2661.c:                if (++txq->next >= txq->count)  /* faster than 
% count */

*safe

dev/ic/rtw.c:   for (next = rdb->rdb_next; ; next = (next + 1) % 
rdb->rdb_ndesc) {
dev/ic/rtw.c:       ("%s: read % from reg[CONFIG1]\n", __func__, cfg1));
dev/ic/rtw.c:       ("%s: read % from reg[%#02]\n", __func__, val,
dev/ic/rtw.c:       ("%s: wrote % to reg[%#02]\n", __func__, val,
dev/ic/rtw.c:   remainder = (bitlen * 2) % rate;
dev/ic/rtw.c:   lastlen0 = paylen % fraglen;

--- rtw.c       Fri Oct 21 07:20:48 2022
+++ /tmp/tfile  Sun Jul 30 10:50:21 2023
@@ -3000,6 +3000,9 @@
        else
                overlen = IEEE80211_CRC_LEN;
 
+       if (fraglen == 0)
+               return -1;
+
        npkt = paylen / fraglen;
        lastlen0 = paylen % fraglen;
 


dev/ic/sti.c:                   a.in.y = ((uc - fp->first) % 
scr->scr_fontmaxcol) *
dev/ic/sti.c:           a.in.srcy = ((uc - fp->first) % scr->scr_fontmaxcol) *

--- sti.c       Fri Oct 21 07:20:48 2022
+++ /tmp/tfile  Sun Jul 30 10:57:18 2023
@@ -1366,6 +1366,11 @@
                a.in.fg_colour = fg;
                a.in.bg_colour = bg;
 
+               /* XXX make sure we don't FPE */
+               if (scr->scr_fontmaxcol == 0) {
+                       return -1;
+               }
+
                a.in.srcx = ((uc - fp->first) / scr->scr_fontmaxcol) *
                    fp->width + scr->scr_fontbase;
                a.in.srcy = ((uc - fp->first) % scr->scr_fontmaxcol) *



dev/ic/tireg.h: (x) = (x + 1) % y;                                      \
dev/ic/tireg.h:       (TI_JRAWLEN % sizeof(u_int64_t))))

*safe

dev/ic/vga.c:   scr->pcs.vc_ccol = cpos % type->ncols;
dev/ic/vga.c:           p = (scr->pcs.visibleoffset - ul + we) % we + lines *
dev/ic/vga.c:           st = (scr->pcs.dispoffset - ul + we) % we;
dev/ic/vga.c:           scr->pcs.visibleoffset = (p + ul) % we;

--- vga.c       Fri May 28 01:24:40 2021
+++ /tmp/tfile  Sun Jul 30 11:08:17 2023
@@ -430,6 +430,10 @@
        scr->pcs.visibleoffset = scr->pcs.dispoffset;
        scr->vga_rollover = 0;
 
+       /* avoid FPE on non-alpha archs */
+       if (type->ncols == 0)
+               return;
+
        scr->pcs.vc_crow = cpos / type->ncols;
        scr->pcs.vc_ccol = cpos % type->ncols;
        pcdisplay_cursor_init(&scr->pcs, existing);
@@ -965,6 +969,9 @@
                if (scr->vga_rollover > vga_scr_end + margin) {
                        ul = vga_scr_end;
                        we = scr->vga_rollover + scr->pcs.type->ncols * 2;
+                       /* avoid FPE */
+                       if (we == 0)
+                               return;
                } else {
                        ul = 0;
                        we = 0x8000;



dev/ic/w83l518d_sdmmc.c:                if (cmd->c_datalen % blklen > 0) {


--- w83l518d_sdmmc.c    Wed Jan 22 04:26:02 2020
+++ /tmp/tfile  Sun Jul 30 11:11:22 2023
@@ -443,10 +443,16 @@
                        goto done;
                }
 
+               /* FPE avoidance */
+               if (cmd->c_blklen == 0) {
+                       cmd->c_error = EIO;
+                       goto done;
+               }
+
                /* Fragment the data into proper blocks */
                blklen = MIN(cmd->c_datalen, cmd->c_blklen);
 
-               if (cmd->c_datalen % blklen > 0) {
+               if ((cmd->c_datalen % blklen) > 0) {
                        printf("%s: data is not a multiple of %u bytes\n",
                            wb->wb_dev.dv_xname, blklen);
                        cmd->c_error = EINVAL;



dev/ic/wdc.c:                   tail = (tail + rec_size) % wdc_log_cap;
dev/ic/wdc.c:   wdc_head = (head + request_size) % wdc_log_cap;
dev/ic/wdc.c:           tail = (tail + rec_size) % wdc_log_cap;

*safe

dev/isa/fd.c:       (((bp->b_blkno % bf) != 0 ||
dev/isa/fd.c:         (bp->b_bcount % fd_bsize) != 0) &&
dev/isa/fd.c:           sec = fd->sc_blkno % type->seccyl;


--- fd.c        Fri Oct 21 07:20:49 2022
+++ /tmp/tfile  Sun Jul 30 11:21:51 2023
@@ -389,6 +389,12 @@
        int fd_bsize = FD_BSIZE(fd);
        int bf = fd_bsize / DEV_BSIZE;
 
+       /* FPE avoidance */
+       if (fd_bsize == 0) {
+               bp->b_error = EIO;
+               goto bad;
+       }
+
        /* Valid unit, controller, and request? */
        if (bp->b_blkno < 0 ||
            (((bp->b_blkno % bf) != 0 ||
@@ -745,6 +751,11 @@
                if (finfo)
                    fd->sc_skip = (char *)&(finfo->fd_formb_cylno(0)) -
                        (char *)finfo;
+
+               /* FPE avoidance */
+               if (type->seccyl == 0)
+                       return 1;
+               
                sec = fd->sc_blkno % type->seccyl;
                nblks = type->seccyl - sec;
                nblks = min(nblks, fd->sc_bcount / fd_bsize);



dev/isa/gus.c:  sc->sc_dmabuf = (sc->sc_dmabuf + 1) % sc->sc_nbufs;
dev/isa/gus.c:                      sc->sc_playbuf = (sc->sc_playbuf + 1) % 
sc->sc_nbufs;
dev/isa/gus.c:  sc->sc_playbuf = (sc->sc_playbuf + 1) % sc->sc_nbufs;

--- gus.c       Tue Apr 11 06:30:34 2023
+++ /tmp/tfile  Sun Jul 30 11:30:42 2023
@@ -833,6 +833,12 @@
            }
        }
        gus_bufcnt[sc->sc_bufcnt-1]++;
+
+
+       /* FPE avoidance */
+       if (sc->sc_nbufs == 0)
+               return;
+
        /*
         * flip to the next DMA buffer
         */
@@ -927,6 +933,10 @@
                                   sc->sc_dev.dv_xname, sc->sc_bufcnt);
                            gus_falsestops++;
 
+                               /* FPE avoidance */
+                               if (sc->sc_nbufs == 0)
+                                               return -1;
+                               
                            sc->sc_playbuf = (sc->sc_playbuf + 1) % 
sc->sc_nbufs;
                            gus_start_playing(sc, sc->sc_playbuf);
                        } else if (sc->sc_bufcnt < 0) {
@@ -1090,6 +1100,10 @@
 
        SELECT_GUS_REG(iot, ioh2, GUSREG_VOICE_CNTL);
        bus_space_write_1(iot, ioh2, GUS_DATA_HIGH, sc->sc_voc[voice].voccntl & 
~(GUSMASK_VOICE_IRQ));
+
+       /* FPE avoidance */
+       if (sc->sc_nbufs == 0)
+               return -1;
 
        /*
         * update playbuf to point to the buffer the hardware just started


dev/isa/pcdisplay.c:    dc->pcs.vc_ccol = cpos % pcdisplay_scr.ncols;

--- pcdisplay.c Fri Oct 21 07:20:49 2022
+++ /tmp/tfile  Sun Jul 30 11:34:54 2023
@@ -203,6 +203,10 @@
        dc->pcs.dispoffset = 0;
        dc->pcs.visibleoffset = 0;
 
+       /* FPE avoidance */
+       if (pcdisplay_scr.ncols == 0)
+               return;
+
        dc->pcs.vc_crow = cpos / pcdisplay_scr.ncols;
        dc->pcs.vc_ccol = cpos % pcdisplay_scr.ncols;
        pcdisplay_cursor_init(&dc->pcs, 1);

dev/ofw/fdt.c:                  if ((cnt % sizeof(u_int32_t)) == 0)
dev/ofw/fdt.c:  if (len < 0 || (len % sizeof(uint32_t)))
dev/ofw/fdt.c:  if (len < 0 || (len % sizeof(uint64_t)))

*safe

dev/pci/arc.c:                  rwlen = (wlen - wdone) % sizeof(rwbuf);

*safe 

dev/pci/azalia.c:       blksz -= blksz % mult;

--- azalia.c    Tue Apr 11 06:30:38 2023
+++ /tmp/tfile  Sun Jul 30 11:38:22 2023
@@ -4007,7 +4007,9 @@
        /* must be multiple of 128 bytes */
        mult = audio_blksz_bytes(mode, p, r, 128);
 
-       blksz -= blksz % mult;
+       if (mult != 0)  /* XXX FPE avoidance */
+               blksz -= blksz % mult;
+
        if (blksz == 0)
                blksz = mult;
 


dev/pci/azalia_codec.c: max_gain = AUDIO_MAX_GAIN - AUDIO_MAX_GAIN % steps;
dev/pci/azalia_codec.c: max_gain = AUDIO_MAX_GAIN - AUDIO_MAX_GAIN % steps;

*safe


dev/pci/drm/drm_atomic.c:        * record of happily burning through 100% cpu 
(or worse, crash) when the
dev/pci/drm/drm_atomic_uapi.c:              new_blob->length % 
expected_elem_size != 0) {
dev/pci/drm/drm_fb_helper.c:            off_t bit_off = (off % 
info->fix.line_length) * 8;
dev/pci/drm/drm_fb_helper.c:            off_t bit_end = (end % 
info->fix.line_length) * 8;
dev/pci/drm/display/drm_dp_helper.c:     * - FxP is within 25% of desired value.
dev/pci/drm/display/drm_dp_helper.c:     *   Note: 25% is arbitrary value and 
may need some tweak.
dev/pci/drm/display/drm_dp_helper.c:    /* Ensure frequency is within 25% of 
desired value */
dev/pci/drm/display/drm_dp_mst_topology.c:       * margin 5300ppm + 300ppm ~ 
0.6% as per spec, factor is 1.006
dev/pci/drm/display/drm_dsc_helper.c:          ((slice_bits - 
num_extra_mux_bits) % vdsc_cfg->mux_word_size))
dev/pci/drm/linux_sort.c:#define SWAPINIT(a, es) swaptype = ((char *)a - (char 
*)0) % sizeof(long) || \
dev/pci/drm/linux_sort.c:       es % sizeof(long) ? 2 : es == sizeof(long)? 0 : 
1;
dev/pci/drm/drm_linux.c:                if ((i % rowsize) == 0)
dev/pci/drm/drm_linux.c:                if ((i % rowsize) == (rowsize - 1))
dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c: uint32_t pipe = (pipe_id % 
adev->gfx.mec.num_pipe_per_mec);
dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c: pipe = (pipe_id % 
adev->gfx.mec.num_pipe_per_mec);
dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c: pipe = (pipe_id % 
adev->gfx.mec.num_pipe_per_mec);
dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c:       uint32_t pipe = 
(pipe_id % adev->gfx.mec.num_pipe_per_mec);
dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c:       pipe = (pipe_id % 
adev->gfx.mec.num_pipe_per_mec);
dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c:               pipe = (pipe_id 
% adev->gfx.mec.num_pipe_per_mec);
dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c:       pipe = (pipe_id % 
adev->gfx.mec.num_pipe_per_mec);
dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c:  uint32_t pipe = (pipe_id % 
adev->gfx.mec.num_pipe_per_mec);
dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c:  pipe = (pipe_id % 
adev->gfx.mec.num_pipe_per_mec);
dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c:  uint32_t pipe = (pipe_id % 
adev->gfx.mec.num_pipe_per_mec);
dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c:  pipe = (pipe_id % 
adev->gfx.mec.num_pipe_per_mec);
dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c:          pipe = (pipe_id % 
adev->gfx.mec.num_pipe_per_mec);
dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c:  uint32_t pipe = (pipe_id % 
adev->gfx.mec.num_pipe_per_mec);
dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c:  pipe = (pipe_id % 
adev->gfx.mec.num_pipe_per_mec);
dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c:  pipe = (pipe_id % 
adev->gfx.mec.num_pipe_per_mec);
dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c:  queue_slot = queue_idx % 
adev->gfx.mec.num_queue_per_pipe;
dev/pci/drm/amd/amdgpu/amdgpu_atom.c:           ctx->ctx->divmul[1] = dst % src;
dev/pci/drm/amd/amdgpu/amdgpu_display.c:        if (rfb->base.pitches[plane] % 
block_pitch) {
dev/pci/drm/amd/amdgpu/amdgpu_display.c:        if (rfb->base.offsets[plane] % 
block_size) {
dev/pci/drm/amd/amdgpu/amdgpu_gfx.c:    *queue = bit % 
adev->gfx.mec.num_queue_per_pipe;
dev/pci/drm/amd/amdgpu/amdgpu_gfx.c:            % 
adev->gfx.mec.num_pipe_per_mec;
dev/pci/drm/amd/amdgpu/amdgpu_gfx.c:    *queue = bit % 
adev->gfx.me.num_queue_per_pipe;
dev/pci/drm/amd/amdgpu/amdgpu_gfx.c:            % adev->gfx.me.num_pipe_per_me;
dev/pci/drm/amd/amdgpu/amdgpu_gfx.c:                    pipe = i % 
adev->gfx.mec.num_pipe_per_mec;
dev/pci/drm/amd/amdgpu/amdgpu_gfx.c:                    pipe = i % 
adev->gfx.me.num_pipe_per_me;
dev/pci/drm/amd/amdgpu/amdgpu_psp.c:    if ((psp_write_ptr_reg % ring_size_dw) 
== 0)
dev/pci/drm/amd/amdgpu/amdgpu_psp.c:    psp_write_ptr_reg = (psp_write_ptr_reg 
+ rb_frame_size_dw) % ring_size_dw;
dev/pci/drm/amd/amdgpu/amdgpu_ras.c:                            data->rptr) % 
data->ring_size;
dev/pci/drm/amd/amdgpu/amdgpu_ras.c:                    data->wptr) % 
data->ring_size;
dev/pci/drm/amd/amdgpu/amdgpu_ras_eeprom.c:             g1 = b % 
control->ras_max_record_count + 1;
dev/pci/drm/amd/amdgpu/amdgpu_ras_eeprom.c:                     
control->ras_fri = g1 % control->ras_max_record_count;
dev/pci/drm/amd/amdgpu/amdgpu_ras_eeprom.c:                             
control->ras_fri = (b + 1) % control->ras_max_record_count;
dev/pci/drm/amd/amdgpu/amdgpu_ras_eeprom.c:                     
control->ras_fri = g1 % control->ras_max_record_count;
dev/pci/drm/amd/amdgpu/amdgpu_ras_eeprom.c:             % 
control->ras_max_record_count;
dev/pci/drm/amd/amdgpu/amdgpu_ras_eeprom.c:     g1 = g0 % 
control->ras_max_record_count;
dev/pci/drm/amd/amdgpu/amdgpu_ras_eeprom.c:             r = r % 
rec_hdr_fmt_size;
dev/pci/drm/amd/amdgpu/amdgpu_ras_eeprom.c:             /* Warn if we are at 
90% of the threshold or above
dev/pci/drm/amd/amdgpu/amdgpu_ras_eeprom.c:                     
dev_warn(adev->dev, "RAS records:%u exceeds 90%% of threshold:%d",
dev/pci/drm/amd/amdgpu/amdgpu_sa.c:     wasted = (align - (soffset % align)) % 
align;
dev/pci/drm/amd/amdgpu/amdgpu_sa.c:     wasted = (align - (soffset % align)) % 
align;
dev/pci/drm/amd/amdgpu/gfx_v8_0.c:       * around 25% of gpu resources.
dev/pci/drm/amd/amdgpu/gfx_v9_0.c:       * around 25% of gpu resources.
dev/pci/drm/amd/amdgpu/gfx_v9_4_2.c:    mem = instance % blk->num_mem_blocks;
dev/pci/drm/amd/amdgpu/gfx_v9_4_2.c:    way = (instance / blk->num_mem_blocks) 
% blk->num_ways;
dev/pci/drm/amd/amdgpu/gfx_v9_4_2.c:            wave = i % 
cu_info->max_waves_per_simd;
dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c: uint32_t pipe = (pipe_id % 
adev->gfx.mec.num_pipe_per_mec);
dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c: pipe = (pipe_id % 
adev->gfx.mec.num_pipe_per_mec);
dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c:         pipe = (pipe_id % 
adev->gfx.mec.num_pipe_per_mec);
dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c: pipe = (pipe_id % 
adev->gfx.mec.num_pipe_per_mec);
dev/pci/drm/amd/amdkfd/kfd_device.c:            new_cpu = cpumask_next(new_cpu, 
cpu_online_mask) % nr_cpu_ids;
dev/pci/drm/amd/amdkfd/kfd_device_queue_manager.c:                      pipe = 
((pipe + 1) % get_pipes_per_mec(dqm)), ++i) {
dev/pci/drm/amd/amdkfd/kfd_device_queue_manager.c:      
dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_per_mec(dqm);
dev/pci/drm/amd/amdkfd/kfd_device_queue_manager.c:                      
q->sdma_id % kfd_get_num_xgmi_sdma_engines(dqm->dev);
dev/pci/drm/amd/amdkfd/kfd_kernel_queue.c:                      wptr = (wptr + 
1) % queue_size_dwords;
dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c:                
new_blob->length % expected_elem_size != 0) {
dev/pci/drm/amd/display/dc/dc_dp_types.h:       /* 0.5 % downspread 30 kHz */
dev/pci/drm/amd/display/dc/dc_dp_types.h:       /* 0.5 % downspread 33 kHz */
dev/pci/drm/amd/display/dc/basics/conversion.c:         remainder = a % b;
dev/pci/drm/amd/display/dc/basics/fixpt31_32.c: * *remainder = dividend % 
divisor
dev/pci/drm/amd/display/dc/bios/bios_parser.c:           * value 3% for 
bandwidth calculation */
dev/pci/drm/amd/display/dc/bios/bios_parser.c:           * value 3% for 
bandwidth calculation */
dev/pci/drm/amd/display/dc/bios/bios_parser.c:           *  conservative value 
3% for bandwidth calculation */
dev/pci/drm/amd/display/dc/bios/bios_parser.c:           * conservative value 
3% for bandwidth calculation */
dev/pci/drm/amd/display/dc/bios/bios_parser.c:           *  conservative value 
3% for bandwidth calculation */
dev/pci/drm/amd/display/dc/bios/bios_parser.c:           * conservative value 
3% for bandwidth calculation */
dev/pci/drm/amd/display/dc/core/dc_link.c:       * margin 5300ppm + 300ppm ~ 
0.6% as per spec, factor is 1.006
dev/pci/drm/amd/display/dc/core/dc_link.c:               * data bandwidth 
efficiency is 80% with additional 3% overhead if FEC is supported.
dev/pci/drm/amd/display/dc/core/dc_link_dp.c:   uint32_t bytes_per_pixel_mod = 
config->bytes_per_pixel % precision;
dev/pci/drm/amd/display/dc/core/dc_link_dp.c:           
ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0);
dev/pci/drm/amd/display/dc/core/dc_resource.c:  ASSERT(src.x % vpc_div == 0 && 
src.y % vpc_div == 0);
dev/pci/drm/amd/display/dc/dce/dce_clock_source.c:      /*structure for HDMI, 
no SS or SS% <= 0.06% for 27 MHz Ref clock */
dev/pci/drm/amd/display/dc/dce110/dce110_hw_sequencer.c:                
early_control = active_total_with_borders % lane_count;
dev/pci/drm/amd/display/dc/dce112/dce112_compressor.c:                  
(rows_per_channel % lpt_alignment) ?
dev/pci/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c:                  us_x10 
% frac);
dev/pci/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c:  *end_line = 
(*start_line + 2) % timing->v_total;
dev/pci/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c:          *end_line = 
(*start_line + 2) % timing->v_total;
dev/pci/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c:          *end_line = 
(*start_line + 2) % timing->v_total;
dev/pci/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c:                    
(s->data_urgent * frac) / ref_clk_mhz / frac, (s->data_urgent * frac) / 
ref_clk_mhz % frac,
dev/pci/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c:                    
(s->pte_meta_urgent * frac) / ref_clk_mhz / frac, (s->pte_meta_urgent * frac) / 
ref_clk_mhz % frac,
dev/pci/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c:                    
(s->sr_enter * frac) / ref_clk_mhz / frac, (s->sr_enter * frac) / ref_clk_mhz % 
frac,
dev/pci/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c:                    
(s->sr_exit * frac) / ref_clk_mhz / frac, (s->sr_exit * frac) / ref_clk_mhz % 
frac,
dev/pci/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c:                    
(s->dram_clk_chanage * frac) / ref_clk_mhz / frac, (s->dram_clk_chanage * frac) 
/ ref_clk_mhz % frac);
dev/pci/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c:                    
                (s->min_ttu_vblank * frac) / ref_clk_mhz / frac, 
(s->min_ttu_vblank * frac) / ref_clk_mhz % frac,
dev/pci/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c:                    
                (s->qos_level_low_wm * frac) / ref_clk_mhz / frac, 
(s->qos_level_low_wm * frac) / ref_clk_mhz % frac,
dev/pci/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c:                    
                (s->qos_level_high_wm * frac) / ref_clk_mhz / frac, 
(s->qos_level_high_wm * frac) / ref_clk_mhz % frac);
dev/pci/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c:                    
                (s->min_ttu_vblank * frac) / ref_clk_mhz / frac, 
(s->min_ttu_vblank * frac) / ref_clk_mhz % frac,
dev/pci/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c:                    
                (s->qos_level_low_wm * frac) / ref_clk_mhz / frac, 
(s->qos_level_low_wm * frac) / ref_clk_mhz % frac,
dev/pci/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c:                    
                (s->qos_level_high_wm * frac) / ref_clk_mhz / frac, 
(s->qos_level_high_wm * frac) / ref_clk_mhz % frac);
dev/pci/drm/amd/display/dc/dcn20/dcn20_hwseq.c:         early_control = 
active_total_with_borders % lane_count;
dev/pci/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c:                      
                ((unsigned int) (prefill - 2.0) % swath_height);
dev/pci/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c:      
meta_row_remainder = meta_row_width_ub % meta_chunk_width;
dev/pci/drm/amd/display/dc/dml/dml_inline_defs.h:       remainder = num % 
multiple;
dev/pci/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c:     if ((ix % w) == 
0 && p != 0)
dev/pci/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c:   if ((ix % w) == 
0 && p != 0)
dev/pci/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c:  
meta_row_remainder = meta_row_width_ub % meta_chunk_width;
dev/pci/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c:        
meta_row_remainder = meta_row_width_ub % meta_chunk_width;
dev/pci/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c:     if ((ix % w) == 
0 && p != 0)
dev/pci/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c:                     
meta_row_remainder = meta_row_width[k] % meta_chunk_width;
dev/pci/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c:  
meta_row_remainder = meta_row_width_ub % meta_chunk_width;
dev/pci/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c:     if ((ix % w) == 
0 && P != 0)
dev/pci/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c:                     
meta_row_remainder = meta_row_width[k] % meta_chunk_width;
dev/pci/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c:                     
        meta_row_remainder_chroma = meta_row_width_chroma[k] % 
meta_chunk_width_chroma;
dev/pci/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c:  
meta_row_remainder = meta_row_width_ub % meta_chunk_width;
dev/pci/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c:     if ((ix % w) == 
0 && P != 0)
dev/pci/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c:                     
meta_row_remainder = meta_row_width[k] % meta_chunk_width;
dev/pci/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c:                     
        meta_row_remainder_chroma = meta_row_width_chroma[k] % 
meta_chunk_width_chroma;
dev/pci/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c:  
meta_row_remainder = meta_row_width_ub % meta_chunk_width;
dev/pci/drm/amd/display/dc/dml/calcs/dce_calcs.c:       /*for the bottom 
interlace field an extra 50% of the vertical scale ratio is considered for this 
calculation.*/
dev/pci/drm/amd/display/dc/dml/calcs/dce_calcs.c:                               
/*the memory efficiency will be 50% for the 32 byte sized data.*/
dev/pci/drm/amd/display/dc/dml/calcs/dce_calcs.c:       /*during self-refresh, 
sclk can be reduced to dispclk divided by the minimum pixels in the data fifo 
entry, with 15% margin, but shoudl not be set to less than the request 
bandwidth.*/
dev/pci/drm/amd/display/dc/dml/calcs/dcn_calcs.c:                * slow-slow 
corner + 10% margin with voltages aligned to FCLK.
dev/pci/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c:   if ((ix % w) == 
0 && P != 0)
dev/pci/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c:                   
meta_row_remainder = meta_row_width[k] % meta_chunk_width;
dev/pci/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c:                   
        meta_row_remainder_chroma = meta_row_width_chroma[k] % 
meta_chunk_width_chroma;
dev/pci/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c:        
meta_row_remainder = meta_row_width_ub % meta_chunk_width;
dev/pci/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c:        if ((ix 
% w) == 0 && p != 0)
dev/pci/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c:                
        meta_row_remainder = meta_row_width[k] % meta_chunk_width;
dev/pci/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c:                
                meta_row_remainder_chroma = meta_row_width_chroma[k] % 
meta_chunk_width_chroma;
dev/pci/drm/amd/display/dc/dsc/dc_dsc.c:                if (pic_width % 
max_slices_h == 0)
dev/pci/drm/amd/display/dc/dsc/dc_dsc.c:        if (pic_width % 
dsc_common_caps.max_slice_width)
dev/pci/drm/amd/display/dc/dsc/dc_dsc.c:        if (pic_width % min_slices_h != 
0)
dev/pci/drm/amd/display/dc/dsc/dc_dsc.c:        while (slice_height < 
pic_height && (pic_height % slice_height != 0 ||
dev/pci/drm/amd/display/dc/inc/clock_source.h:  uint32_t percentage;            
/*> In unit of 0.01% or 0.001%*/
dev/pci/drm/amd/display/dc/dcn314/dcn314_hwseq.c:               
ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0);
dev/pci/drm/amd/display/dc/dcn32/dcn32_hwseq.c: if (cache_lines_used % 
lines_per_way > 0)
dev/pci/drm/amd/display/dc/dcn32/dcn32_hwseq.c:         
ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0);
dev/pci/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c:      if 
(cache_lines_used % lines_per_way > 0)
dev/pci/drm/amd/display/include/grph_object_ctrl_defs.h:        otherwise in 
0.001% units (spreadPercentageDivider = 1000); */
dev/pci/drm/amd/display/include/link_service_types.h:   
DATA_EFFICIENCY_8b_10b_x10000 = 8000, /* 80% data efficiency */
dev/pci/drm/amd/display/include/link_service_types.h:   
DATA_EFFICIENCY_8b_10b_FEC_EFFICIENCY_x100 = 97, /* 97% data efficiency when 
FEC is enabled */
dev/pci/drm/amd/display/include/link_service_types.h:   
DATA_EFFICIENCY_128b_132b_x10000 = 9646, /* 96.71% data efficiency x 99.75% 
downspread factor */
dev/pci/drm/amd/display/modules/hdcp/hdcp_log.c:                        
trailing_bytes = msg_size % bytes_per_line;
dev/pci/drm/amd/display/modules/hdcp/hdcp_log.c:                        if (i % 
bytes_per_line == 0)
dev/pci/drm/amd/display/modules/power/power_helpers.c:  /* Setup all brightness 
levels between 0% and 100% exclusive
dev/pci/drm/amd/display/modules/power/power_helpers.c:  /* Setup all brightness 
levels between 0% and 100% exclusive
dev/pci/drm/amd/include/amd_acpi.h: * BYTE  - min input signal, in range 0-255, 
corresponding to 0% backlight
dev/pci/drm/amd/include/amd_acpi.h: * BYTE  - max input signal, in range 0-255, 
corresponding to 100% backlight
dev/pci/drm/amd/include/amd_acpi.h: * BYTE  - input signal in range 0-255 / 
does not have entries for 0% and 100%
dev/pci/drm/amd/include/atombios.h:  USHORT              
usSpreadSpectrumPercentage;      //in unit of 0.01% or 0.001%, decided by 
ucSpreadSpectrumMode bit4
dev/pci/drm/amd/include/pptable.h:    USHORT  usPWMMin;                        
// The minimum PWM value in percent (0.01% increments).
dev/pci/drm/amd/pm/powerplay/inc/hwmgr.h:       uint16_t  usPWMMin;             
           /* The minimum PWM value in percent (0.01% increments). */
dev/pci/drm/amd/pm/powerplay/inc/hwmgr.h:       uint16_t  usFanRPMMaxLimit;     
           /* Maximum limit range in percentage, usually set to 100% by default 
*/
dev/pci/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h:      USHORT  usPWMMin;       
                                         /* The minimum PWM value in percent 
(0.01% increments). */
dev/pci/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h:      USHORT  usPWMMin;       
                                         /* The minimum PWM value in percent 
(0.01% increments). */
dev/pci/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h:      USHORT  usPWMMin;       
                                         /* The minimum PWM value in percent 
(0.01% increments). */
dev/pci/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c:                      
"percent sclk value must range from 1% to 100%, setting default value",
dev/pci/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c:/* [2.5%,~2.5%] Clock 
stretched is multiple of 2.5% vs
dev/pci/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c:   /* Indicates maximum 
activity level for this performance level. 50% for now*/
dev/pci/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c: pcc_target_percent = 
10; /*  Hardcode 10% for now. */
dev/pci/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c: ss_target_percent = 2; 
/*  Hardcode 2% for now. */
dev/pci/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c:     /* Indicates maximum 
activity level for this performance level. 50% for now*/
dev/pci/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c:     pcc_target_percent = 
10; /*  Hardcode 10% for now. */
dev/pci/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c:     ss_target_percent = 2; 
/*  Hardcode 2% for now. */
dev/pci/drm/amd/pm/swsmu/inc/amdgpu_smu.h:       * @get_gfx_off_residency: 
Average GFXOFF residency % during the logging interval
dev/pci/drm/drm_modes.c:        /* 1) top/bottom margin size (% of height) - 
default: 1.8, */
dev/pci/drm/drm_modes.c:                /* 3) Nominal HSync width (% of line 
period) - default 8 */
dev/pci/drm/drm_modes.c:{       /* 1) top/bottom margin size (% of height) - 
default: 1.8, */
dev/pci/drm/drm_modes.c:        /* width of hsync as % of total line */
dev/pci/drm/ttm/ttm_device.c:   /* Limit the number of pages in the pool to 
about 50% of the total
dev/pci/drm/i915/i915_drv.h:    return p % pbits + INTEL_SUBPLATFORM_BITS;
dev/pci/drm/i915/i915_irq.c:    scanline = (scanline + vblank_start) % vtotal;
dev/pci/drm/i915/i915_irq.c:    return (position + crtc->scanline_offset) % 
vtotal;
dev/pci/drm/i915/i915_irq.c:            position = (position + htotal - 
hsync_start) % vtotal;
dev/pci/drm/i915/i915_perf.c:                     head > OA_BUFFER_SIZE || head 
% report_size ||
dev/pci/drm/i915/i915_perf.c:                     tail > OA_BUFFER_SIZE || tail 
% report_size,
dev/pci/drm/i915/i915_perf.c:                     head > OA_BUFFER_SIZE || head 
% report_size ||
dev/pci/drm/i915/i915_perf.c:                     tail > OA_BUFFER_SIZE || tail 
% report_size,
dev/pci/drm/i915/i915_pmu.c:     * Strictly speaking the passed in period may 
not be 100% accurate for
dev/pci/drm/i915/i915_reg.h:/* Encoder test pattern 2 - full screen vertical 
75% color bars */
dev/pci/drm/i915/i915_reg.h:/* Encoder test pattern 3 - full screen horizontal 
75% color bars */
dev/pci/drm/i915/i915_reg.h: * This test mode forces the DACs to 50% of full 
output.
dev/pci/drm/i915/display/i9xx_plane.c:   * never allowed to exceed 80% of 
cdclk. Let's just go
dev/pci/drm/i915/display/i9xx_plane.c:   * Not 100% correct for planes that can 
move between pipes,
dev/pci/drm/i915/display/icl_dsi.c:                 vdsc_cfg->pic_width % 
vdsc_cfg->slice_width);
dev/pci/drm/i915/display/icl_dsi.c:                 vdsc_cfg->pic_height % 
vdsc_cfg->slice_height);
dev/pci/drm/i915/display/intel_cdclk.c: /* pixel rate mustn't exceed 95% of 
cdclk with IPS on BDW */
dev/pci/drm/i915/display/intel_cursor.c:         * Not 100% correct for planes 
that can move between pipes,
dev/pci/drm/i915/display/intel_display.c:                * is > 90% of the 
(display) core speed.
dev/pci/drm/i915/display/intel_display.c:        * is 2.5%; use 5% for safety's 
sake.
dev/pci/drm/i915/display/intel_display.c:        * not 100% correct to do this 
here. But SKL+ calculate
dev/pci/drm/i915/display/intel_dp.c:             * 97.71% data bandwidth 
efficiency. Consider max_link_rate the
dev/pci/drm/i915/display/intel_dp.c:     * 80% data bandwidth efficiency for 
SST non-FEC. However, this turns
dev/pci/drm/i915/display/intel_dp_aux_backlight.c:              /* Assume 100% 
brightness if backlight controls aren't enabled yet */
dev/pci/drm/i915/display/intel_dpll_mgr.c:               * code only cares 
about 5% accuracy, and spread is a max of
dev/pci/drm/i915/display/intel_dpll_mgr.c:               * 0.5% downspread.
dev/pci/drm/i915/display/intel_fb.c:    *x += tiles % pitch_tiles * tile_width;
dev/pci/drm/i915/display/intel_fb.c:                    *y = (offset % 
alignment) / pitch;
dev/pci/drm/i915/display/intel_fb.c:                    *x = ((offset % 
alignment) - *y * pitch) / cpp;
dev/pci/drm/i915/display/intel_fb.c:    if (alignment != 0 && 
fb->offsets[color_plane] % alignment) {
dev/pci/drm/i915/display/intel_fb.c:    ccs_x = (x * hsub) % tile_width;
dev/pci/drm/i915/display/intel_fb.c:    ccs_y = (y * vsub) % tile_height;
dev/pci/drm/i915/display/intel_fb.c:    main_x = 
intel_fb->normal_view.color_plane[main_plane].x % tile_width;
dev/pci/drm/i915/display/intel_fb.c:    main_y = 
intel_fb->normal_view.color_plane[main_plane].y % tile_height;
dev/pci/drm/i915/display/intel_hdmi.c:  return mode->crtc_hdisplay % 
pixels_per_group == 0 &&
dev/pci/drm/i915/display/intel_hdmi.c:          mode->crtc_htotal % 
pixels_per_group == 0 &&
dev/pci/drm/i915/display/intel_hdmi.c:          mode->crtc_hblank_start % 
pixels_per_group == 0 &&
dev/pci/drm/i915/display/intel_hdmi.c:          mode->crtc_hblank_end % 
pixels_per_group == 0 &&
dev/pci/drm/i915/display/intel_hdmi.c:          mode->crtc_hsync_start % 
pixels_per_group == 0 &&
dev/pci/drm/i915/display/intel_hdmi.c:          mode->crtc_hsync_end % 
pixels_per_group == 0 &&
dev/pci/drm/i915/display/intel_hdmi.c:           mode->crtc_htotal/2 % 
pixels_per_group == 0);
dev/pci/drm/i915/display/intel_hdmi.c:          if (vactive % slice_height == 0)
dev/pci/drm/i915/display/intel_overlay.c:               if (rec->offset_Y % 
depth)
dev/pci/drm/i915/display/intel_overlay.c:       if (rec->src_width % uv_hscale)
dev/pci/drm/i915/display/intel_psr.c:   if (crtc_hdisplay % 
intel_dp->psr.su_w_granularity)
dev/pci/drm/i915/display/intel_psr.c:   if (crtc_vdisplay % 
intel_dp->psr.su_y_granularity)
dev/pci/drm/i915/display/intel_psr.c:   if (y_granularity == 0 || crtc_vdisplay 
% y_granularity)
dev/pci/drm/i915/display/intel_psr.c:   pipe_clip->y1 -= pipe_clip->y1 % 
y_alignment;
dev/pci/drm/i915/display/intel_psr.c:   if (pipe_clip->y2 % y_alignment)
dev/pci/drm/i915/display/intel_sprite.c:        if (src_x % hsub || src_w % 
hsub) {
dev/pci/drm/i915/display/intel_sprite.c:        if (src_y % vsub || src_h % 
vsub) {
dev/pci/drm/i915/display/intel_sprite.c:        /* Starting limit is 90% of 
cdclk */
dev/pci/drm/i915/display/intel_sprite.c:        /* -10% per decimation step */
dev/pci/drm/i915/display/intel_sprite.c:        /* -10% for RGB */
dev/pci/drm/i915/display/intel_sprite.c:         * We should also do -10% if 
sprite scaling is enabled
dev/pci/drm/i915/display/skl_universal_plane.c:         aux_x = x * hsub + 
aux_x % hsub;
dev/pci/drm/i915/display/skl_universal_plane.c:         aux_y = y * vsub + 
aux_y % vsub;
dev/pci/drm/i915/display/skl_universal_plane.c:         
plane_state->view.color_plane[ccs_plane].x = (x * hsub + src_x % hsub) / 
main_hsub;
dev/pci/drm/i915/display/skl_universal_plane.c:         
plane_state->view.color_plane[ccs_plane].y = (y * vsub + src_y % vsub) / 
main_vsub;
dev/pci/drm/i915/display/vlv_dsi.c:     if (exit_zero_cnt < (55 * ui_den / 
ui_num) && (55 * ui_den) % ui_num)
dev/pci/drm/i915/display/intel_backlight.c:      * 25% of the max.
dev/pci/drm/i915/display/hsw_ips.c:             /* pixel rate mustn't exceed 
95% of cdclk with IPS on BDW */
dev/pci/drm/i915/display/skl_watermark.c:                       if (lines % 
wp->y_min_scanlines == 0)
dev/pci/drm/i915/display/skl_watermark.c:                                       
lines % wp->y_min_scanlines;
dev/pci/drm/i915/display/intel_pch_refclk.c:            p->phaseinc = 
p->desired_divisor % p->iclk_pi_range;
dev/pci/drm/i915/gem/i915_gem_shmem.c:           * cache) for about a 10% 
performance improvement
dev/pci/drm/i915/gem/selftests/huge_pages.c:            ce = 
engines->engines[order[i] % engines->num_engines];
dev/pci/drm/i915/gem/selftests/i915_gem_context.c:                              
this = igt_request_alloc(ctx[n % nctx], engine);
dev/pci/drm/i915/gem/selftests/i915_gem_object.c:                   
i915_gem_object_get_page(obj, n % nreal)) {
dev/pci/drm/i915/gem/selftests/i915_gem_object.c:                              
n, n % nreal);
dev/pci/drm/i915/intel_pm.c:     * reserved for the sprite plane. It's not 100% 
clear
dev/pci/drm/i915/gt/intel_engine_cs.c:                  idx = ++read % 
num_entries;
dev/pci/drm/i915/gt/selftest_execlists.c:               struct intel_engine_cs 
*engine = siblings[n % nsibling];
dev/pci/drm/i915/gt/selftest_timeline.c:                idx = state->count++ % 
state->max;
dev/pci/drm/i915/gt/selftest_timeline.c:                idx = --state->count % 
state->max;
dev/pci/drm/i915/gt/uc/intel_guc_ct.c:  tail = (tail + 1) % size;
dev/pci/drm/i915/gt/uc/intel_guc_ct.c:  tail = (tail + 1) % size;
dev/pci/drm/i915/gt/uc/intel_guc_ct.c:          tail = (tail + 1) % size;
dev/pci/drm/i915/gt/uc/intel_guc_ct.c:  head = (head + 1) % size;
dev/pci/drm/i915/gt/uc/intel_guc_ct.c:          head = (head + 1) % size;
dev/pci/drm/i915/gt/uc/intel_guc_capture.c:     if (i % sizeof(u32)) {
dev/pci/drm/i915/gvt/cmd_parser.c:                      workload->rb_head) % 
guest_rb_size;
dev/pci/drm/i915/selftests/i915_request.c:                              
t->contexts[order[n] % t->ncontexts];
dev/pci/drm/i915/selftests/intel_memory_region.c:               ce = 
engines->engines[order[i] % engines->num_engines];
dev/pci/drm/i915/selftests/intel_memory_region.c:               size = 
bytes[order[i] % count];
dev/pci/drm/i915/selftests/intel_memory_region.c:               align = 
bytes[order[i] % count];
dev/pci/drm/include/drm/drm_connector.h:         * Add margins to the mode 
calculation (1.8% of xres rounded
dev/pci/drm/include/drm/drm_connector.h:         * down to 8 pixels and 1.8% of 
yres).
dev/pci/drm/include/drm/drm_crtc.h:      * only differ in less than 1% of the 
refresh rate). The active width
dev/pci/drm/include/linux/gcd.h:        c = a % b;
dev/pci/drm/include/linux/gcd.h:                c = a % b;
dev/pci/drm/include/linux/math64.h:     *rem = x % y;
dev/pci/drm/include/linux/math64.h:     *rem = x % y;
dev/pci/drm/radeon/atom.c:              ctx->ctx->divmul[1] = dst % src;
dev/pci/drm/radeon/atombios.h:  USHORT              usSpreadSpectrumPercentage; 
        //in unit of 0.01% or 0.001%, decided by ucSpreadSpectrumMode bit4
dev/pci/drm/radeon/pptable.h:    USHORT  usPWMMin;                        // 
The minimum PWM value in percent (0.01% increments).
dev/pci/drm/radeon/radeon_display.c:     * Note that this method of completion 
handling is still not 100% race
dev/pci/drm/radeon/radeon_legacy_tv.c:  tv_dac->tv.hrestart = restart % h_total;
dev/pci/drm/radeon/radeon_legacy_tv.c:  tv_dac->tv.vrestart = restart % v_total;
dev/pci/drm/radeon/radeon_legacy_tv.c:  tv_dac->tv.frestart = restart % f_total;
dev/pci/drm/radeon/radeon_object.c:      * VRAM usage can change a lot, so 
playing safe even at 50% does
dev/pci/drm/radeon/radeon_sa.c: wasted = (align - (soffset % align)) % align;
dev/pci/drm/radeon/radeon_sa.c: wasted = (align - (soffset % align)) % align;

*not gonna do drm, linux's job?

dev/pci/cz.c:           put = ((put + move) % size);
dev/pci/cz.c:           get = (get + 1) % size;

--- cz.c        Fri Mar 11 19:00:45 2022
+++ /tmp/tfile  Sun Jul 30 11:43:53 2023
@@ -1578,6 +1578,10 @@
                }
 #endif
 
+               /* FPE avoidance */
+               if (size == 0)
+                       return -1;
+
                put = ((put + move) % size);
                done = 1;
        }
@@ -1611,6 +1615,10 @@
                }
 #endif
                (*linesw[tp->t_line].l_rint)(ch, tp);
+               /* FPE avoidance */
+               if (size == 0)
+                       return -1;
+
                get = (get + 1) % size;
                done = 1;
        }

dev/pci/eap.c:          while (((EREAD4(sc, EAP_ADC_SIZE) >> 16) + 8) % nw == 
0) {


*safe (dead code)

dev/pci/emuxki.c:               if (bufsize % blksize)    /* This should not 
happen */

--- emuxki.c    Tue Apr 11 06:30:39 2023
+++ /tmp/tfile  Sun Jul 30 11:51:04 2023
@@ -1649,6 +1649,10 @@
        int idx;
        int error = EFAULT;
 
+       /* FPE avoidance */
+       if (blksize == 0)
+               return (error);
+
        LIST_FOREACH(mem, &voice->sc->mem, next) {
                if (KERNADDR(mem->dmamem) != ptr)
                        continue;


dev/pci/envy.c: if (bufsz % blksz) {
dev/pci/envy.c: if (bufsz % blksz != 0) {

*safe except in ENVY_DEBUG mode

dev/pci/esa.c:                                              % play_bufsize;
dev/pci/esa.c:                                              % play_bufsize;
dev/pci/esa.c:                                              % rec_bufsize;
dev/pci/esa.c:                                              % rec_bufsize;

*safe


dev/pci/if_bgereg.h:#define     BGE_INC(x, y)   (x) = (x + 1) % y
dev/pci/if_bgereg.h:    (BGE_JRAWLEN % sizeof(u_int64_t))))

--- if_bge.c    Tue Apr 11 06:30:39 2023
+++ /tmp/tfile  Sun Jul 30 12:00:34 2023
@@ -3506,6 +3506,11 @@
                cur_rx = &sc->bge_rdata->bge_rx_return_ring[rx_cons];
 
                rxidx = cur_rx->bge_idx;
+
+               /* avoid FPE */

+               if (sc->bge_return_ring_cnt == 0)
+                       continue;
+       
                BGE_INC(rx_cons, sc->bge_return_ring_cnt);
 
                if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {


dev/pci/if_bnx.c:                   (offset % sc->bnx_flash_info->page_size);
dev/pci/if_bnx.c:                   (offset % sc->bnx_flash_info->page_size);
dev/pci/if_bnx.c:               page_start -= (page_start % 
sc->bnx_flash_info->page_size);

*safe

dev/pci/if_bnxt.c:                      bq = &sc->sc_queues[i % sc->sc_nqueues];
dev/pci/if_bnxt.c:          (cpr->commit_cons+1) % cpr->ring.ring_size, 1);
dev/pci/if_bnxt.c:          (cpr->commit_cons+1) % cpr->ring.ring_size, 1);

*safe

dev/pci/if_bwfm_pci.c:  flowid = flowid % sc->sc_max_flowrings;
dev/pci/if_bwfm_pci.c:          flowid = (flowid + 1) % sc->sc_max_flowrings;
dev/pci/if_bwfm_pci.c:  flowid = flowid % sc->sc_max_flowrings;
dev/pci/if_bwfm_pci.c:          flowid = (flowid + 1) % sc->sc_max_flowrings;

--- if_bwfm_pci.c       Tue Apr 11 06:30:39 2023
+++ /tmp/tfile  Sun Jul 30 12:20:45 2023
@@ -609,6 +609,12 @@
                sc->sc_max_completionrings = BWFM_NUM_RX_MSGRINGS;
        }
 
+       if (sc->sc_max_flowrings == 0) {
+               printf("%s: avoiding FPE condition\n", 
+                   DEVNAME(sc));
+               return 1;
+       }
+
        if (sc->sc_dma_idx_sz == 0) {
                d2h_w_idx_ptr = letoh32(ringinfo.d2h_w_idx_ptr);
                d2h_r_idx_ptr = letoh32(ringinfo.d2h_r_idx_ptr);


dev/pci/if_em_hw.c:                     if ((((offset + widx) * 2) % 
eeprom->page_size) == 0) {

*safe but not if struct hw_em is blanked and page_size is thus 0.

dev/pci/if_igc.c:                   (rxr->last_desc_filled + 1) % 
sc->num_rx_desc);
dev/pci/if_igc.c:                   (rxr->last_desc_filled + 1) % 
sc->num_rx_desc);
dev/pci/if_igc.c:               queue_id = (i % sc->sc_nqueues);

--- if_igc.c    Tue Apr 11 06:30:40 2023
+++ /tmp/tfile  Sun Jul 30 12:40:17 2023
@@ -2359,6 +2359,10 @@
        /* Warning FM follows */
        reta = 0;
        for (i = 0; i < 128; i++) {
+               /* FPE avoidance */
+               if (sc->sc_nqueues == 0)
+                       continue;
+
                queue_id = (i % sc->sc_nqueues);
                /* Adjust if required */
                queue_id = queue_id << shift;


dev/pci/if_iwm.c:                       if (tlv_len % sizeof(struct 
iwm_fw_cmd_version)) {
dev/pci/if_iwm.c:               index = (buf->head_sn + i) % buf->buf_size;
dev/pci/if_iwm.c:               int index = ssn % reorder_buf->buf_size;
dev/pci/if_iwm.c:       index = sn % buffer->buf_size;
dev/pci/if_iwm.c:               quota_rem = IWM_MAX_QUOTA % num_active_macs;
dev/pci/if_iwm.c:#define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % 
count);

--- if_iwm.c    Tue Apr 11 06:30:40 2023
+++ /tmp/tfile  Sun Jul 30 12:49:42 2023
@@ -5044,7 +5044,8 @@
        if (iwm_is_sn_less(nssn, ssn, reorder_buf->buf_size))
                goto set_timer;
 
-       while (iwm_is_sn_less(ssn, nssn, reorder_buf->buf_size)) {
+       while (reorder_buf->buf_size &&
+               iwm_is_sn_less(ssn, nssn, reorder_buf->buf_size)) {
                int index = ssn % reorder_buf->buf_size;
                struct mbuf *m;
                int chanidx, is_shortpre;
@@ -5278,7 +5279,10 @@
                return 0;
        }
 
-       index = sn % buffer->buf_size;
+       if (buffer->buf_size != 0)
+               index = sn % buffer->buf_size;
+       else
+               index = 0;
 
        /*
         * Check if we already stored this frame



dev/pci/if_iwn.c:       mod = letoh64(cmd.tstamp) % val;
dev/pci/if_iwn.c: * Limit the total dwell time to 85% of the beacon interval.
dev/pci/if_iwn.c:                   ((100 % bintval) * 1024));
dev/pci/if_iwn.c:                       if (len % sizeof(uint32_t))

* careful here! ni_intval is read from the wire and influences valand bintval
  if it is 0 (specially crafted frame?) would an iwn driven device FPE?

--- if_iwn.c    Fri Oct 21 07:20:51 2022
+++ /tmp/tfile  Sun Jul 30 13:42:25 2023
@@ -5180,7 +5180,8 @@
                hdr->max_out = htole32(200 * 1024);
 
                /* Configure scan pauses which service on-channel traffic. */
-               bintval = ic->ic_bss->ni_intval ? ic->ic_bss->ni_intval : 100;
+               bintval = (ic->ic_bss->ni_intval > 100) ? \
+                               ic->ic_bss->ni_intval : 100;
                hdr->pause_scan = htole32(((100 / bintval) << 22) |
                    ((100 % bintval) * 1024));
        }


* also...

--- /sys/net80211/ieee80211_input.c     Tue Apr 11 06:32:40 2023
+++ /tmp/input.c        Sun Jul 30 13:35:42 2023
@@ -2012,7 +2012,7 @@
                ni->ni_rssi = rxi->rxi_rssi;
        ni->ni_rstamp = rxi->rxi_tstamp;
        memcpy(ni->ni_tstamp, tstamp, sizeof(ni->ni_tstamp));
-       ni->ni_intval = bintval;
+       ni->ni_intval = bintval ? bintval : 1;
        ni->ni_capinfo = capinfo;
        ni->ni_erp = erp;
        /* NB: must be after ni_chan is setup */
@@ -2506,7 +2506,7 @@
 
        ni->ni_rssi = rxi->rxi_rssi;
        ni->ni_rstamp = rxi->rxi_tstamp;
-       ni->ni_intval = bintval;
+       ni->ni_intval = bintval ? bintval : 1;
        ni->ni_capinfo = capinfo;
        ni->ni_chan = ic->ic_bss->ni_chan;
        if (htcaps)
--- /sys/net80211/ieee80211_ioctl.c     Mon Mar  7 09:13:13 2022
+++ /tmp/ioctl.c        Sun Jul 30 13:32:58 2023
@@ -97,7 +97,7 @@
        }
        nr->nr_max_rssi = ic->ic_max_rssi;
        bcopy(ni->ni_tstamp, nr->nr_tstamp, sizeof(nr->nr_tstamp));
-       nr->nr_intval = ni->ni_intval;
+       nr->nr_intval = ni->ni_intval ? ni->ni_intval : 1;
        nr->nr_capinfo = ni->ni_capinfo;
        nr->nr_erp = ni->ni_erp;
        nr->nr_pwrsave = ni->ni_pwrsave;
@@ -166,7 +166,7 @@
        bcopy(nr->nr_rates, ni->ni_rates.rs_rates, IEEE80211_RATE_MAXSIZE);
 
        /* Node information */
-       ni->ni_intval = nr->nr_intval;
+       ni->ni_intval = nr->nr_intval ? nr->nr_intval : 1;
        ni->ni_capinfo = nr->nr_capinfo;
        ni->ni_erp = nr->nr_erp;
        ni->ni_pwrsave = nr->nr_pwrsave;
--- /sys/net80211/ieee80211_node.c      Sun Mar 20 08:50:32 2022
+++ /tmp/node.c Sun Jul 30 13:34:33 2023
@@ -958,7 +958,7 @@
        ni->ni_rssi = 0;
        ni->ni_rstamp = 0;
        memset(ni->ni_tstamp, 0, sizeof(ni->ni_tstamp));
-       ni->ni_intval = ic->ic_lintval;
+       ni->ni_intval = ic->ic_lintval ? ic->ic_lintval : 1;
        ni->ni_capinfo = IEEE80211_CAPINFO_IBSS;
        if (ic->ic_flags & IEEE80211_F_WEPON)
                ni->ni_capinfo |= IEEE80211_CAPINFO_PRIVACY;



dev/pci/if_iwx.c:                       if (tlv_len % sizeof(struct 
iwx_fw_cmd_version)) {
dev/pci/if_iwx.c:               index = (buf->head_sn + i) % buf->buf_size;
dev/pci/if_iwx.c:               int index = ssn % reorder_buf->buf_size;
dev/pci/if_iwx.c:       index = sn % buffer->buf_size;
dev/pci/if_iwx.c:               ring->tail_hw = (ring->tail_hw + 1) % 
sc->max_tfd_queue_size;
dev/pci/if_iwx.c:       ring->cur_hw = (ring->cur_hw + 1) % 
sc->max_tfd_queue_size;
dev/pci/if_iwx.c:       ring->cur_hw = (ring->cur_hw + 1) % 
sc->max_tfd_queue_size;
dev/pci/if_iwx.c:       sc->setkey_cur = (sc->setkey_cur + 1) % 
nitems(sc->setkey_arg);


--- if_iwx.c    Tue Apr 11 06:30:40 2023
+++ /tmp/tfile  Sun Jul 30 13:56:35 2023
@@ -4834,7 +4834,8 @@
        if (iwx_is_sn_less(nssn, ssn, reorder_buf->buf_size))
                goto set_timer;
 
-       while (iwx_is_sn_less(ssn, nssn, reorder_buf->buf_size)) {
+       while (reorder_buf->buf_size &&
+               iwx_is_sn_less(ssn, nssn, reorder_buf->buf_size)) {
                int index = ssn % reorder_buf->buf_size;
                struct mbuf *m;
                int chanidx, is_shortpre;
@@ -5068,7 +5069,10 @@
                return 0;
        }
 
-       index = sn % buffer->buf_size;
+       if (buffer->buf_size != 0)
+               index = sn % buffer->buf_size;
+       else
+               index = 0;
 
        /*
         * Check if we already stored this frame
@@ -8468,7 +8472,8 @@
                return err;
        }
 
-       if (sc->setkey_nkeys >= nitems(sc->setkey_arg))
+       if (sc->setkey_nkeys >= nitems(sc->setkey_arg) ||
+               nitems(sc->setkey_arg) == 0)
                return ENOSPC;
 
        a = &sc->setkey_arg[sc->setkey_cur];





dev/pci/if_ixl.c:               lute[i] = i % nqueues;

--- if_ixl.c    Tue Apr 11 06:30:40 2023
+++ /tmp/tfile  Sun Jul 30 13:59:39 2023
@@ -2215,7 +2215,7 @@
        stoeplitz_to_key(&rsskey, sizeof(rsskey));
 
        nqueues = ixl_nqueues(sc);
-       for (i = 0; i < sizeof(lut); i++) {
+       for (i = 0; nqueues && i < sizeof(lut); i++) {
                /*
                 * ixl must have a power of 2 rings, so using mod
                 * to populate the table is fine.


dev/pci/if_lgereg.h:#define LGE_INC(x, y)               (x) = (x + 1) % y
dev/pci/if_lgereg.h:    (LGE_JRAWLEN % sizeof(u_int64_t))))

*safe


ANOTHERBREAKPLEASE


Reply via email to