This is an automated email from the ASF dual-hosted git repository. mboehm7 pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/systemds.git
commit 6396846748f2e80ade7c04aae498e83b811cac5a Author: Matthias Boehm <[email protected]> AuthorDate: Sun Jan 31 15:30:16 2021 +0100 [SYSTEMDS-2818] Fix read/copy of large dense matrix blocks >16GB The generalization of matrix blocks towards multi-dimensional arrays (tensors) and different value types introduced integer overflows when computing internal offsets during block copies as done during the read of large dense matrices. This patch fixes both the overflows and indexing logic in general. Unfortunately, the issue only showed up with blocks >16GB that are infeasible to include into the unit tests. --- src/main/java/org/apache/sysds/runtime/data/DenseBlock.java | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/main/java/org/apache/sysds/runtime/data/DenseBlock.java b/src/main/java/org/apache/sysds/runtime/data/DenseBlock.java index 47498f9..489400d 100644 --- a/src/main/java/org/apache/sysds/runtime/data/DenseBlock.java +++ b/src/main/java/org/apache/sysds/runtime/data/DenseBlock.java @@ -459,10 +459,13 @@ public abstract class DenseBlock implements Serializable if (db.isNumeric()) { int rowOther = 0; int colOther = 0; - for (int bi = index(rl); bi <= index(ru - 1); bi++) { + for (int bi = index(rl); bi <= index(ru-1); bi++) { + int rpos = bi*blockSize(); + int rposl = Math.max(rl-rpos, 0); if (allColumns) { - int offset = rl * _odims[0] + cl; - for (int i = 0; i < (ru - rl) * _odims[0]; i++) { + int offset = rposl * _odims[0] + cl; + int rlen = Math.min(ru-Math.max(rpos,rl), blockSize(bi)-rposl); + for (int i = 0; i < rlen * _odims[0]; i++) { setInternal(bi, offset + i, db.get(rowOther, colOther)); colOther++; if (colOther == db.getCumODims(0)) {
