This is an automated email from the ASF dual-hosted git repository.

ssiddiqi pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/systemds.git


The following commit(s) were added to refs/heads/main by this push:
     new 69231e1c72 [SYSTEMDS-3166] Implemented builtin anomaly detection via 
Isolation Forest The commit includes the builtin implementation as well as unit 
and functionality tests in dml, both of which are located in the staging area.
69231e1c72 is described below

commit 69231e1c72ae85fdbe1ee2f6955854cace0517f1
Author: Sigmaeon <[email protected]>
AuthorDate: Wed Jan 10 14:40:53 2024 +0100

    [SYSTEMDS-3166] Implemented builtin anomaly detection via Isolation Forest
    The commit includes the builtin implementation as well as unit and 
functionality tests in dml, both of which are located in the staging area.
    
    Closes #1980.
---
 .../staging/isolationForest/isolationForest.dml    | 625 ++++++++++++++++++
 .../isolationForest/test/isolationForestTest.dml   | 707 +++++++++++++++++++++
 2 files changed, 1332 insertions(+)

diff --git a/scripts/staging/isolationForest/isolationForest.dml 
b/scripts/staging/isolationForest/isolationForest.dml
new file mode 100644
index 0000000000..0f71cef3c0
--- /dev/null
+++ b/scripts/staging/isolationForest/isolationForest.dml
@@ -0,0 +1,625 @@
+# 
---------------------------------------------------------------------------------------------
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# 
---------------------------------------------------------------------------------------------
+
+#==============================================================================================
+# THIS SCRIPT IMPLEMENTS ANOMALY DETECTION VIA ISOLATION FOREST AS DESCRIBED 
IN 
+# [Liu2008]:
+#   Liu, F. T., Ting, K. M., & Zhou, Z. H. 
+#   (2008, December). 
+#   Isolation forest. 
+#   In 2008 eighth ieee international conference on data mining (pp. 413-422). 
+#   IEEE.     
+#==============================================================================================
+
+
+# This function creates an iForest model as described in [Liu2008]
+#
+# INPUT PARAMETERS:
+# 
---------------------------------------------------------------------------------------------
+# NAME          TYPE     DEFAULT      MEANING
+# 
---------------------------------------------------------------------------------------------
+# X                 Matrix[Double]                Numerical feature matrix
+# n_trees            Int                                       Number of 
iTrees to build
+# subsampling_size  Int                           Size of the subsample to 
build iTrees with 
+# seed              Int              -1           Seed for calls to `sample` 
and `rand`.
+#                                                 -1 corresponds to a random 
seed
+# 
---------------------------------------------------------------------------------------------
+# OUTPUT: 
+# iForestModel  The trained iForest model to be used in 
outlierByIsolationForestApply.
+#               The model is represented as a list with two entries:
+#               Entry 'model' (Matrix[Double]) - The iForest Model in 
linearized form (see m_iForest)
+#               Entry 'subsampling_size' (Double) - The subsampling size used 
to build the model.
+# 
-------------------------------------------------------------------------------------------
+outlierByIsolationForest = function(Matrix[Double] X, Int n_trees, Int 
subsampling_size, Int seed = -1) 
+  return(List[Unknown] iForestModel)
+{
+  M = m_iForest(X, n_trees, subsampling_size, seed)
+  iForestModel = list(model=M, subsampling_size=subsampling_size)
+}
+
+# Calculates the anomaly score as described in [Liu2008] for a set of samples 
`X` based 
+# on an iForest model.
+#
+# INPUT PARAMETERS:
+# 
---------------------------------------------------------------------------------------------
+# NAME              TYPE               DEFAULT      MEANING
+# 
---------------------------------------------------------------------------------------------
+# iForestModel           List[Unknown]                 The trained iForest 
model as returned by 
+#                                                 outlierByIsolationForest
+# X                 Matrix[Double]                Samples to calculate the 
anomaly score for
+# 
---------------------------------------------------------------------------------------------
+# OUTPUT: 
+# anomaly_scores   Column vector of anomaly scores corresponding to the 
samples in X. 
+#                  Samples with an anomaly score > 0.5 are generally 
considered to be outliers
+# 
-------------------------------------------------------------------------------------------
+outlierByIsolationForestApply = function(List[Unknown] iForestModel, 
Matrix[Double] X)
+  return(Matrix[Double] anomaly_scores)
+{
+  assert(nrow(X) > 1)
+
+  M = as.matrix(iForestModel['model'])
+  subsampling_size = as.integer(as.scalar(iForestModel['subsampling_size']))
+  assert(subsampling_size > 1)
+
+  height_limit = ceil(log(subsampling_size, 2))  
+  tree_size = 2*(2^(height_limit+1)-1)
+  assert(ncol(M) == tree_size & nrow(M) > 1)
+
+  anomaly_scores = matrix(0, rows=nrow(X), cols=1)
+  parfor (i_x in 1:nrow(X)) {
+    anomaly_scores[i_x, 1] = m_score(M, X[i_x,], subsampling_size)
+  }
+}
+
+# This function implements isolation forest for numerical input features as 
+# described in [Liu2008]. 
+#
+# The returned 'linearized' model is of type Matrix[Double] where each row 
+# corresponds to a linearized iTree (see m_iTree). Note that each tree in the 
+# model is padded with placeholder nodes such that each iTree has the same 
maximum depth.
+# 
+# .. code-block::
+#
+#   For example, give a feature matrix with features [a,b,c,d]
+#   and the following iForest, M would look as follows:
+#
+#   Level              Tree 1                  Tree 2        Node Depth
+#   -------------------------------------------------------------------        
            
+#   (L1)               |d<=5|                  |b<=6|           0
+#                     /     \                 /      \  
+#   (L2)             2    |a<=7|             20       0         1 
+#                          /   \
+#   (L3)                  10    8                               2 
+#
+#   --> M :=
+#   [[ 4,  5,  0,  2,  1,  7, -1, -1, -1, -1,  0, 10,  0,  8],  (Tree 1)
+#    [ 2,  6,  0, 20,  0,  0, -1, -1, -1, -1, -1, -1, -1, -1]]  (Tree 2)
+#    | (L1) | |    (L2)     | |            (L3)             | 
+#
+#
+# INPUT PARAMETERS:
+# 
---------------------------------------------------------------------------------------------
+# NAME              TYPE             DEFAULT      MEANING
+# 
---------------------------------------------------------------------------------------------
+# X                 Matrix[Double]                Numerical feature matrix
+# n_trees            Int                                       Number of 
iTrees to build
+# subsampling_size  Int                           Size of the subsample to 
build iTrees with 
+# seed              Int              -1           Seed for calls to `sample` 
and `rand`.
+#                                                 -1 corresponds to a random 
seed
+# 
---------------------------------------------------------------------------------------------
+# OUTPUT PARAMETERS: 
+# 
---------------------------------------------------------------------------------------------
+# M    Matrix containing the learned iForest in linearized form
+# 
---------------------------------------------------------------------------------------------
+m_iForest = function(Matrix[Double] X, Int n_trees, Int subsampling_size, Int 
seed = -1) 
+  return(Matrix[Double] M)
+{
+  # check assumptions
+  s_warning_assert(n_trees > 0, "iForest: Requirement n_trees > 0 not 
satisfied! ntrees: "+toString(n_trees))
+  s_warning_assert(subsampling_size > 1 & subsampling_size <= nrow(X), 
"iForest: Requirement 0 < subsampling_size <= nrow(X) not satisfied! 
subsampling_size: "+toString(subsampling_size)+"; nrow(X): "+toString(nrow(X)))
+
+  height_limit = ceil(log(subsampling_size, 2))
+  tree_size = 2*(2^(height_limit+1)-1)
+
+  # initialize the model
+  M = matrix(-1, cols=tree_size, rows=n_trees)
+  seeds = matrix(seq(1, n_trees), cols=n_trees, rows=1)*seed
+
+  parfor (i_iTree in 1:n_trees) {
+    # subsample rows
+    tree_seed = ifelse(seed == -1, -1, as.scalar(seeds[1, i_iTree]))
+    X_subsample = s_sampleRows(X, subsampling_size, tree_seed)
+
+    # Build iTree
+    tree_seed = ifelse(seed == -1, -1, tree_seed+42)
+    M_tree = m_iTree(X_subsample, height_limit, tree_seed)
+
+    # Add iTree to the model
+    M[i_iTree, 1:ncol(M_tree)] = M_tree
+  }
+}
+
+# This function implements isolation trees for numerical input features as 
+# described in [Liu2008].
+#
+# The returned 'linearized' model is of type Matrix[Double] with exactly one 
row.
+# Here, each node is represented by two consecutive entries in this row 
vector. 
+# Traversing the row vector from left to right corresponds to traversing the 
tree
+# level-wise from top to bottom and left to right. If a node does not exist 
+# (e.g. because the parent node is already a leaf node), the node is still 
stored
+# using placeholder values.
+# Recall that for a binary tree with maximum depth `d`, the maximum number of 
nodes 
+# `can be calculated by `2^(maximum depth + 1) - 1`. Hence, for a given 
maximum depth 
+# of an iTree, the row vector will have exactly `2*2^(maximum depth + 1) - 1` 
entries.
+#
+# There are three types of nodes that are represented in this model:
+# - Internal Node
+#  A node a that based on a "split feature" and corresponding "split value" 
+#  devides the data into two parts, one of which can potentially be an empty 
set.
+#  The node is lineraized in the following way:
+#    - Entry 1: Represents the index of the splitting feature in the feature 
matrix `X`
+#    - Entry 2: Represents splitting value
+#
+# - External Node
+#  A leaf node of the tree, It contains the "size" of the node. That is the 
+#  number of remaining samples after splitting the feature matrix X by 
traversing 
+#  the tree to this node.
+#  The node is lineraized in the following way:
+#  - Entry 1: Always 0 - indicating an external node
+#  - Entry 2: The "size" of the node
+#
+# - Placeholder Node
+#  A node that is not present in the actual iTree and is used for "padding".
+#  Both entries are set to -1
+# 
+# .. code-block::
+#
+#   For example, give a feature matrix with features [a,b,c,d]
+#   and the following tree, M would look as follows:
+#   Level              Tree                Node Depth
+#   ------------------------------------------------- 
+#   (L1)               |d<5|                   0
+#                     /     \
+#   (L2)             1    |a<7|                1 
+#                          /   \
+#   (L3)                  10    0              2 
+#
+#   --> M :=
+#   [[4, 5, 0, 1, 1, 7, -1, -1, -1, -1, 0, 10, 0, 0]]
+#    |(L1)| |  (L2)  | |          (L3)            |
+#
+#
+#
+# INPUT PARAMETERS:
+# 
---------------------------------------------------------------------------------------------
+# NAME            TYPE             DEFAULT      MEANING
+# 
---------------------------------------------------------------------------------------------
+# X               Matrix[Double]                Numerical feature matrix
+# max_depth      Int                                   Maximum depth of the 
learned tree where depth is the 
+#                                               maximum number of edges from 
root to a leaf note
+# seed            Int              -1           Seed for calls to `sample` and 
`rand`.
+#                                               -1 corresponds to a random seed
+# 
---------------------------------------------------------------------------------------------
+# OUTPUT PARAMETERS: 
+# 
---------------------------------------------------------------------------------------------
+# M    Matrix M containing the learned tree in linearized form
+# 
---------------------------------------------------------------------------------------------
+m_iTree = function(Matrix[Double] X, Int max_depth, Int seed = -1)
+  return(Matrix[Double] M) 
+{
+  # check assumptions
+  s_warning_assert(max_depth > 0 & max_depth <= 32, "iTree: Requirement 0 < 
max_depth < 32 not satisfied! max_depth: " + max_depth)
+  s_warning_assert(nrow(X) > 0, "iTree: Feature matrix X has no less than 2 
rows!")
+
+
+  # Initialize M to largest possible matrix given max_depth 
+  # Note that each node takes exactly 2 indices in M and the root node has 
depth 0
+  M = matrix(-1, rows=1, cols=2*(2^(max_depth+1)-1))
+  
+  # Queue for implementing recursion in the original algorithm.
+  # Each entry in the queue corresponds to a node that in the tree to be added 
to the model 
+  # M and, in case of internal nodes, split further.
+  # Nodes in this queue are represented by an ID (first index) and the data 
corrseponding to the node (second index)
+  node_queue = list(list(1, X));
+  # variable tracking the maximum ID of in the tree
+  max_id = 1;
+  while (length(node_queue) > 0) {
+    # pop next node from queue for splitting
+    [node_queue, queue_entry] = remove(node_queue, 1);
+    node = as.list(queue_entry);
+    node_id = as.scalar(node[1]);
+    X_node = as.matrix(node[2]);
+
+    max_id = max(max_id, node_id)
+
+    is_external_leaf = s_isExternalINode(X_node, node_id, max_depth)
+    if (is_external_leaf) {
+      # External Node: Add node to model
+      M = s_addExternalINode(X_node, node_id, M)
+    }
+    else {
+      # Internal Node: Draw split criterion, add node to model and queue child 
nodes
+      seed = ifelse(seed == -1, -1, node_id*seed)
+      [split_feature, split_value] = s_drawSplitPoint(X_node, seed)
+      M = s_addInternalINode(node_id, split_feature, split_value, M)
+      [left_id, X_left, right_id, X_right] = s_splitINode(X_node, node_id, 
split_feature, split_value)
+      
+      node_queue = append(node_queue, list(left_id, X_left))
+      node_queue = append(node_queue, list(right_id, X_right))
+    }    
+  }
+
+  # Prune the model to the actual tree depth
+  tree_depth = floor(log(max_id, 2))
+  M = M[1, 1:2*(2^(tree_depth+1) - 1)];
+}
+
+
+# Randomly draws a split point i.e. a feature and corresponding value to split 
a node by.
+#
+# INPUT PARAMETERS:
+# 
---------------------------------------------------------------------------------------------
+# NAME          TYPE             DEFAULT      MEANING
+# 
---------------------------------------------------------------------------------------------
+# X             Matrix[Double]                Numerical feature matrix
+# seed          Int              -1           Seed for calls to `sample` and 
`rand`  
+#                                             -1 corresponds to a random seed
+# 
+# 
---------------------------------------------------------------------------------------------
+# OUTPUT PARAMETERS: 
+# 
---------------------------------------------------------------------------------------------
+# split_feature   Index of the feature used for splitting the node
+# split_value     Feature value used for splitting the node 
+# 
---------------------------------------------------------------------------------------------
+s_drawSplitPoint = function(Matrix[Double] X, Int seed = -1) 
+  return(Int split_feature, Double split_value)
+{
+  # find random feature and a value between the min and max values of that 
feature to split the node by
+  split_feature = as.integer(as.scalar(sample(ncol(X), 1, FALSE, seed)))
+  split_value = as.scalar(rand(
+    rows=1, cols=1,
+    min=min(X[, split_feature]),
+    max=max(X[, split_feature]),
+    seed=seed
+  ))
+}
+
+# Adds a external (leaf) node to the linearized iTree model `M`. In the 
linerized form, 
+# each node is assigned two neighboring indices. For external nodes the value 
at the first 
+# index in M is always set to 0 while the value at the second index is set to 
the number of
+# rows in the feature matrix corresponding to the node.
+#
+# INPUT PARAMETERS:
+# 
---------------------------------------------------------------------------------------------
+# NAME          TYPE             DEFAULT      MEANING
+# 
---------------------------------------------------------------------------------------------
+# X_node        Matrix[Double]                Numerical feature matrix 
corresponding to the node
+# node_id       Int                           ID of the node
+# M             Matrix[Double]                Linerized model to add the node 
to
+# 
---------------------------------------------------------------------------------------------
+# OUTPUT PARAMETERS: 
+# 
---------------------------------------------------------------------------------------------
+# M   The updated model
+# 
---------------------------------------------------------------------------------------------
+s_addExternalINode = function(Matrix[Double] X_node, Int node_id, 
Matrix[Double] M) 
+  return(Matrix[Double] M)
+{
+  s_warning_assert(node_id > 0, "s_addExternalINode: Requirement `node_id > 0` 
not satisfied!")
+  
+  node_start_index = 2*node_id-1
+  M[, node_start_index] = 0
+  M[, node_start_index + 1] = nrow(X_node)
+}
+
+# Adds a internal node to the linearized iTree model `M`. In the linerized 
form, 
+# each node is assigned two neighboring indices. For internal nodes the value 
at the first 
+# index in M is set to index of the feature to split by while the value at the 
second index 
+# is set to the value to split the node by.
+#
+# INPUT PARAMETERS:
+# 
---------------------------------------------------------------------------------------------
+# NAME           TYPE             DEFAULT      MEANING
+# 
---------------------------------------------------------------------------------------------
+# node_id        Int                           ID of the node
+# split_feature  Int                           Index of the feature to split 
the node by
+# split_value    Int                           Value to split the node by
+# M              Matrix[Double]                Linerized model to add the node 
to
+# 
---------------------------------------------------------------------------------------------
+# OUTPUT PARAMETERS: 
+# 
---------------------------------------------------------------------------------------------
+# M   The updated model
+# 
---------------------------------------------------------------------------------------------
+s_addInternalINode = function(Int node_id, Int split_feature, Double 
split_value, Matrix[Double] M)
+  return(Matrix[Double] M)
+{
+  s_warning_assert(node_id > 0, "s_addInternalINode: Requirement `node_id > 0` 
not satisfied!")
+  s_warning_assert(split_feature > 0, "s_addInternalINode: Requirement 
`split_feature > 0` not satisfied!")
+
+  node_start_index = 2*node_id-1
+  M[, node_start_index] = split_feature
+  M[, node_start_index + 1] = split_value
+}
+
+# This function determines if a iTree node is an external node based on it's 
node_id and the data corresponding to the node   
+#
+# INPUT PARAMETERS:
+# 
---------------------------------------------------------------------------------------------
+# NAME            TYPE             DEFAULT      MEANING
+# 
---------------------------------------------------------------------------------------------
+# X_node        Matrix[Double]                  Numerical feature matrix 
corresponding to the node
+# node_id        Int                                     ID belonging to the 
node
+# max_depth      Int                                   Maximum depth of the 
learned tree where depth is the 
+#                                               maximum number of edges from 
root to a leaf note
+# 
---------------------------------------------------------------------------------------------
+# OUTPUT PARAMETERS: 
+# 
---------------------------------------------------------------------------------------------
+# isExternalNote   true if the node is an external (leaf) node, false 
otherwise.
+#                  This is the case when a max depth is reached or the number 
of rows 
+#                  in the feature matrix corresponding to the node <= 1
+# 
---------------------------------------------------------------------------------------------
+s_isExternalINode = function(Matrix[Double] X_node, Int node_id, Int 
max_depth) 
+  return(Boolean isExternalNode)
+{
+  s_warning_assert(max_depth > 0, "s_isExternalINode: Requirement `max_depth > 
0` not satisfied!")
+  s_warning_assert(node_id > 0, "s_isExternalINode: Requirement `node_id > 0` 
not satisfied!")
+
+  node_depth = floor(log(node_id, 2))
+  isExternalNode = node_depth >= max_depth | nrow(X_node) <= 1
+}
+
+
+# This function splits a node based on a given feature and value and returns 
the sub-matrices 
+# and IDs corresponding to the nodes resulting from the split.
+#
+# INPUT PARAMETERS:
+# 
---------------------------------------------------------------------------------------------
+# NAME            TYPE             DEFAULT      MEANING
+# 
---------------------------------------------------------------------------------------------
+# X_node        Matrix[Double]                  Numerical feature matrix 
corresponding
+# node_id       Int                             ID of the node to split   
+# split_feature Int                             Index of the feature to split 
the input matrix by 
+# split_value   Int                             Value of the feature to split 
the input matrix by 
+#
+# 
---------------------------------------------------------------------------------------------
+# OUTPUT PARAMETERS: 
+# 
---------------------------------------------------------------------------------------------
+# left_id    ID of the resulting left node
+# X_left     Matrix corresponding to the left node resulting from the split 
with rows where 
+#            value for feature `split_feature` <= value `split_value`
+# right_id   ID of the resulting right node
+# X_right    Matrix corresponding to the left node resulting from the split 
with rows where 
+#            value for feature `split_feature` > value `split_value`
+# 
---------------------------------------------------------------------------------------------
+s_splitINode = function(Matrix[Double] X_node, Int node_id, Int split_feature, 
Double split_value) 
+  return(Int left_id, Matrix[Double] X_left, Int right_id, Matrix[Double] 
X_right)
+{
+  s_warning_assert(nrow(X_node) > 0, "s_splitINode: Requirement `nrow(X_node) 
> 0` not satisfied!")
+  s_warning_assert(node_id > 0, "s_splitINode: Requirement `nrow(X_node) > 0` 
not satisfied!")
+  s_warning_assert(split_feature > 0, "s_splitINode: Requirement 
`split_feature > 0` not satisfied!")
+
+  left_rows_mask = X_node[, split_feature] <= split_value 
+
+  # In the lineraized form of the iTree model, nodes need to be ordered by 
depth
+  # Since iTrees are binary trees we can use 2*node_id/2*node_id+1 for 
left/right child ids 
+  # to insure that IDs are chosen accordingly.
+  left_id = 2 * node_id
+  X_left = removeEmpty(target=X_node, margin="rows", select=left_rows_mask, 
empty.return=FALSE)
+
+  right_id = 2 * node_id + 1
+  X_right = removeEmpty(target=X_node, margin="rows", select=!left_rows_mask, 
empty.return=FALSE)
+}
+
+# Randomly samples `size` rows from a matrix X
+#
+# INPUT PARAMETERS:
+# 
---------------------------------------------------------------------------------------------
+# NAME            TYPE             DEFAULT      MEANING
+# 
---------------------------------------------------------------------------------------------
+# X             Matrix[Double]                  Matrix to sample rows from
+# sample_size   Int                             Number of rows to sample
+# seed          Int              -1             Seed for calls to `sample`
+#                                               -1 corresponds to a random seed
+#
+# 
---------------------------------------------------------------------------------------------
+# OUTPUT PARAMETERS: 
+# 
---------------------------------------------------------------------------------------------
+# X_sampled    Sampled rows from X
+# 
---------------------------------------------------------------------------------------------
+s_sampleRows = function(Matrix[Double] X, Int size, Int seed = -1)
+  return(Matrix[Double] X_extracted)
+{
+  s_warning_assert(size > 0 & nrow(X) >= size, "s_sampleRows: Requirements 
`size > 0 & nrow(X) >= size` not satisfied")
+  random_vector = rand(rows=nrow(X), cols=1, seed=seed)
+  X_rand = cbind(X, random_vector)
+
+  # order by random vector and return `size` nr of rows`
+  X_rand = order(target=X_rand, by=ncol(X_rand))
+  X_extracted = X_rand[1:size, 1:ncol(X)]
+}
+
+# Calculates the PathLength as defined in [Liu2008] based on a sample x
+#
+# INPUT PARAMETERS:
+# 
---------------------------------------------------------------------------------------------
+# NAME          TYPE             DEFAULT      MEANING
+# 
---------------------------------------------------------------------------------------------
+# M             Matrix[Double]                The linearized iTree model
+# x             Matrix[Double]                The sample to calculate the 
PathLength
+#
+# 
---------------------------------------------------------------------------------------------
+# OUTPUT PARAMETERS: 
+# 
---------------------------------------------------------------------------------------------
+# PathLength  The PathLength for the sample
+# 
---------------------------------------------------------------------------------------------
+m_PathLength = function(Matrix[Double] M, Matrix[Double] x)
+  return(Double PathLength)
+{
+  [nrEdgesTraversed, externalNodeSize] = s_traverseITree(M, x)
+  
+  if (externalNodeSize <= 1) {
+    PathLength = nrEdgesTraversed   
+  }
+  else {
+    PathLength = nrEdgesTraversed + s_cn(externalNodeSize)
+  }
+}
+
+
+# Traverses an iTree based on a sample x
+#
+# INPUT PARAMETERS:
+# 
---------------------------------------------------------------------------------------------
+# NAME          TYPE             DEFAULT      MEANING
+# 
---------------------------------------------------------------------------------------------
+# M             Matrix[Double]                The linearized iTree model to 
traverse
+# x             Matrix[Double]                The sample to traverse the iTree 
with
+#
+# 
---------------------------------------------------------------------------------------------
+# OUTPUT PARAMETERS: 
+# 
---------------------------------------------------------------------------------------------
+# nrEdgesTraversed         The number of edges traversed until an external 
node was reached
+# externalNodeSize   The size of of the external node assigned to during 
training
+# 
---------------------------------------------------------------------------------------------
+s_traverseITree = function(Matrix[Double] M, Matrix[Double] x)
+  return(Int nrEdgesTraversed, Int externalNodeSize)
+{
+  s_warning_assert(nrow(x) == 1, "s_traverseITree: Requirement `nrow(x) == 1` 
not satisfied!")
+
+  nrEdgesTraversed = 0
+  is_external_node = FALSE
+  node_id = 1
+  while (!is_external_node)
+  {
+    node_start_idx = (node_id*2) - 1
+    split_feature = as.integer(as.scalar(M[1,node_start_idx]))
+    node_value = as.scalar(M[1,node_start_idx + 1])
+
+    if (split_feature > 0) {
+      # internal node - node_value = split_value
+      nrEdgesTraversed = nrEdgesTraversed + 1
+      x_val = as.scalar(x[1, split_feature])
+      if (x_val <= node_value) {
+        # go down left
+        node_id = (node_id * 2)
+      }
+      else {
+        # go down right
+        node_id = (node_id * 2) + 1
+      }
+    }
+    else if (split_feature == 0) {
+      # External node - node_value = node size
+      externalNodeSize = as.integer(node_value)
+      is_external_node = TRUE
+    }
+    else {
+      s_warning_assert(FALSE, "iTree is not valid!")
+    }
+  } 
+}
+
+
+# This function gives the average path length of unsuccessful search in BST 
`c(n)`
+# for `n` nodes as given in [Liu2008]. This function is used to normalize the 
path length
+#
+# INPUT PARAMETERS:
+# 
---------------------------------------------------------------------------------------------
+# NAME          TYPE             DEFAULT      MEANING
+# 
---------------------------------------------------------------------------------------------
+# n             Int                           Number of samples that 
corresponding to an external
+#                                             node for which c(n) should be 
calculated
+# 
---------------------------------------------------------------------------------------------
+# OUTPUT PARAMETERS: 
+# 
---------------------------------------------------------------------------------------------
+# cn   Value for c(n)
+# 
---------------------------------------------------------------------------------------------
+s_cn = function(Int n)
+  return(Double cn)
+{
+  s_warning_assert(n > 1, "s_cn: Requirement `n > 1` not satisfied!")
+  
+  # Calculate H(n-1)
+  # The approximation of the Harmonic Number H using `log(n) + eulergamma` has 
a higher error
+  # for low n. We hence calculate it directly for the first 1000 values
+  # TODO: Discuss a good value for n --> use e.g. HarmonicNumber(1000) - 
(ln(1000) + 0.5772156649) in WA
+  if (n < 1000) {
+    H_nminus1 = 0
+    for (i in 1:n-1) 
+      H_nminus1 = H_nminus1 + 1/i;
+  }
+  else{
+    # Euler–Mascheroni's constant
+    eulergamma = 0.57721566490153
+    # Approximation harmonic number H(n - 1)
+    H_nminus1 = log(n-1) + eulergamma
+  }
+
+  cn = 2*H_nminus1 - 2*(n-1)/n
+}
+
+# Scors a sample `x` according to score function `s(x, n)` for a sample x and 
a testset-size n, as described in [Liu2008].
+#
+# INPUT PARAMETERS:
+# 
---------------------------------------------------------------------------------------------
+# NAME          TYPE             DEFAULT      MEANING
+# 
---------------------------------------------------------------------------------------------
+# M             Matrix[Double]                 iForest model used to score
+# x             Matrix[Double]                 Sample to be scored 
+# n             Int                            Subsample size the iTrees were 
built from
+# 
---------------------------------------------------------------------------------------------
+# OUTPUT PARAMETERS: 
+# 
---------------------------------------------------------------------------------------------
+# score   The score for 
+# 
---------------------------------------------------------------------------------------------
+m_score = function(Matrix[Double] M, Matrix[Double] x, Int n)
+  return(Double score)
+{
+  s_warning_assert(n > 1, "m_score: Requirement `n > 1` not satisfied!")
+  s_warning_assert(nrow(x) == 1, "m_score: sample has the wrong dimension!")
+  s_warning_assert(nrow(M) > 1, "m_score: invalid iForest Model!")
+
+  h = matrix(0, cols=nrow(M), rows=1)
+  parfor (i_iTree in 1:nrow(M)) {
+    h[1, i_iTree] = m_PathLength(M[i_iTree,], x)
+  }
+
+  score = 2^-(mean(h)/s_cn(n))
+}
+
+# Function that gives a warning if a assertion is violated. This is used 
instead of `assert` and
+# `stop` since these function can not be used in parfor .
+#
+# INPUT PARAMETERS:
+# 
---------------------------------------------------------------------------------------------
+# NAME          TYPE             DEFAULT      MEANING
+# 
---------------------------------------------------------------------------------------------
+# n             Int                           Number of samples that 
corresponding to an external
+#                                             node for which c(n) should be 
calculated
+# 
---------------------------------------------------------------------------------------------
+# OUTPUT PARAMETERS: 
+# 
---------------------------------------------------------------------------------------------
+# cn   Value for c(n)
+# 
---------------------------------------------------------------------------------------------
+s_warning_assert = function(Boolean assertion, String warning) 
+{
+  if (!assertion)
+    print("WARNING! "+warning)
+}
\ No newline at end of file
diff --git a/scripts/staging/isolationForest/test/isolationForestTest.dml 
b/scripts/staging/isolationForest/test/isolationForestTest.dml
new file mode 100644
index 0000000000..9decfe6087
--- /dev/null
+++ b/scripts/staging/isolationForest/test/isolationForestTest.dml
@@ -0,0 +1,707 @@
+#-------------------------------------------------------------
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#-------------------------------------------------------------
+
+source("./scripts/staging/isolationForest/isolationForest.dml") as iForest;
+
+# This scripts tests the isolationForest implementation in isolationForest.dml.
+# In particular functions `outlierByIsolationForest` and 
`outlierByIsolationForestApply`
+# as well as sub-routines are tested here.
+# 
---------------------------------------------------------------------------------------------
+##TODO: Implement the consistency checks in main implementation
+
+# U N I T   T E S T S
+# 
---------------------------------------------------------------------------------------------
+# 
---------------------------------------------------------------------------------------------
+
+# Utility function for printing test results
+record_test_result = function(String testname, Boolean success, Int t_cnt, 
List[String] fails) 
+  return(Int t_cnt, List[String] fails)
+{
+  t_cnt = t_cnt + 1
+
+  if (success) {
+    print("- Test '"+testname+"' was successful!")
+    fails = fails
+  }
+  else {
+    print("- Test '"+testname+"' failed!")
+    fails = append(fails, testname)
+  }
+}
+
+matrices_equal = function(Matrix[Double] m1, Matrix[Double] m2) 
+  return(Boolean equal)
+{
+  if (ncol(m1) == ncol(m2) & nrow(m1) == nrow(m2)) {
+    inequality_mat = (m1 - m2) > 1e-14
+    equal = sum(inequality_mat) == 0
+  }
+  else
+    equal = FALSE
+}
+
+is_itree_consistent = function(Matrix[Double] M, Matrix[Double] X, Int 
max_depth, Boolean is_subsampled_model = FALSE) 
+  return(Boolean consistent)
+{
+  consistent = TRUE
+  n_nodes = length(M) / 2
+  tree_depth = floor(log(n_nodes + 1, 2)) - 1
+
+  # check if the model crresponds to a full binary tree of depth tree_depth
+  check_full_tree = n_nodes > 1 & tree_depth == floor(log(n_nodes, 2)) & 
tree_depth < floor(log(n_nodes + 2, 2))
+  if (!check_full_tree) print("Inconsistency: Model is no full binary tree!")
+  consistent = consistent & check_full_tree
+
+  # check tree depth
+  check_max_depth = tree_depth <= max_depth
+  if (!check_max_depth) print("Inconsistency: Tree depth exeeds max_depth!")
+  consistent = consistent & check_max_depth
+  
+  # root node has to be a valid internal node
+  root_node_split_feature = as.integer(as.scalar(M[1, 1]))
+  root_node_split_value = as.scalar(M[1, 2])
+  check_first_node = root_node_split_feature > 0 & root_node_split_feature <= 
ncol(X) & 
+    min(X[,root_node_split_feature]) <= root_node_split_value & 
max(X[,root_node_split_feature]) >= root_node_split_value
+  if (!check_first_node) print("Inconsistency: Root node is not a valid 
internal node!")
+  consistent = consistent & check_first_node
+
+  sum_external_node_sizes = 0
+  for (node_start_idx in seq(3, length(M), 2)) {
+    node_entry_1 = as.integer(as.scalar(M[1, node_start_idx]))
+    node_entry_2 = as.double(as.scalar(M[1, node_start_idx + 1]))
+    node_id = (node_start_idx + 1) / 2
+    node_depth = floor(log(node_id, 2))
+    parent_node_id = floor(node_id / 2)
+    parent_node_entry_1 = as.integer(as.scalar(M[1, (parent_node_id * 2)-1]))
+
+    if (node_entry_1 > 0) {
+      # internal node
+      if (node_depth == tree_depth) {
+        print("Inconsistency: Node in last level is not an external node!")
+        consistent = FALSE
+      }
+
+      check_split_feature_exists = node_entry_1 <= ncol(X)
+      if (!check_split_feature_exists) print("Inconsistency: Split-Feature 
index "+node_entry_1+" exceeds number of features!")
+
+      consistent = consistent & check_split_feature_exists
+
+      feature = X[,node_entry_1]
+      check_value_in_range = min(feature) <= node_entry_2 & max(feature) >= 
node_entry_2
+      if (!check_value_in_range) print("Inconsistency: Split-Value " + 
node_entry_2 + " is not in range of the feature "+node_entry_1+"!")
+      consistent = consistent & check_value_in_range
+
+      check_parent_node = parent_node_entry_1 > 0
+      if (!check_parent_node) print("The parent of an internal node has to be 
an internal node!")
+      consistent = consistent & check_parent_node
+    }
+    else if (node_entry_1 == 0) {
+      # external node
+      sum_external_node_sizes = as.integer(sum_external_node_sizes + 
node_entry_2)
+
+      check_parent_node = parent_node_entry_1 > 0
+      if (!check_parent_node) print("The parent of an external node has to be 
an internal node!")
+      consistent = consistent & check_parent_node
+    }
+    else if (node_entry_1 == -1) {
+      # placeholder node (empty node entry)
+      check_empty_node = node_entry_2 == -1
+      if (!check_empty_node) print("A non-node can only have -1 as entries!")
+      consistent = consistent & check_empty_node
+
+      check_parent_node = parent_node_entry_1 <= 0
+      if (!check_parent_node) print("The parent of a non-node can only be 
another non-node or an external!")
+      consistent = consistent & check_parent_node
+    }
+    else {
+      print("Inconsistency: First node-entry invalid!")
+      consistent = FALSE
+    }
+
+  }
+
+  # The summed sizes of leaf nodes needs to be the original number of rows
+  # This does not hold for subsampled models!
+  if (!is_subsampled_model) {
+    check_sum_externals = sum_external_node_sizes == nrow(X)
+    if (!check_sum_externals) print("Sizes in external notes do not sum to the 
number of rows in X!")
+    consistent = consistent & check_sum_externals
+  }
+}
+
+is_iforest_consistent = function(Matrix[Double] M, Matrix[Double] X, Int 
subsampling_size) 
+  return(Boolean consistent)
+{
+  consistent = TRUE
+
+  height_limit = ceil(log(subsampling_size, 2))
+  tree_size = 2*(2^(height_limit+1)-1)
+  for (tree_id in 1:nrow(M)) {
+    M_tree = M[tree_id,]
+    check_tree_size = ncol(M_tree) == tree_size
+    if (!check_tree_size) print("iTree in iForest is does not have the 
expected size!")
+    consistent = consistent & check_tree_size
+
+    check_tree_consistent = is_itree_consistent(M_tree, X, height_limit, TRUE)
+    if (!check_tree_consistent) print("iTree at index "+tree_id+" in iForest 
is inconsistent!")
+    consistent = consistent & check_tree_consistent
+  }
+}
+
+# We need to initialize test_counter using a multiple return statement, 
otherwise we have scoping problems!
+# TODO: This is most likely a bug in which case an issue should be created for 
it
+init_tests = function() return(Int cnt, List[String] fails) {cnt=0; 
fails=list();}
+[test_cnt, fails] = init_tests()
+# Test data
+X_3x5_allequal = matrix(1.0, rows=3, cols=5)
+X_8x3_equalrows = matrix("1 2 3 1 2 3 1 2 3 1 2 3 1 2 3 1 2 3 1 2 3 1 2 3", 
cols=3, rows=8)
+X_4x3_equalcols = matrix("1 1 1 2 2 2 3 3 3 4 4 4", cols=3, rows=4)
+X_4x4 = matrix("1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16", cols=4, rows=4)
+X_6x6_ordered = matrix(seq(1,36), rows=6, cols=6)
+X_1x1 =  matrix(42.0, rows=1, cols=1)
+X_singlerow = matrix("1 2 3 4", cols=4, rows=1)
+X_empty = matrix(0.0, rows=0, cols=0)
+
+# empty unpruned linearized iTree model with 4 levels 
+# => max_depth = 3; 30 entries (IDs 1-15);  
+M_itree_4lvl_empty = matrix(0.0, rows=1, cols=2*(2^4-1))
+
+print("Starting Unit Tests")
+print("===============================================================")
+
+# 
=============================================================================================
+# Testing sub-routines
+# 
=============================================================================================
+print("\nTesting Subroutines")
+print("---------------------------------------------------------------")
+
+
+# s_isExternalINode
+# 
---------------------------------------------------------------------------------------------
+print("\ns_isExternalINode")
+
+testname = "isExternalINode: Empty X"
+isexternal = iForest::s_isExternalINode(X_empty, 1, 3)
+[test_cnt, fails] = record_test_result(testname, isexternal, test_cnt, fails)
+
+testname = "isExternalINode: Single Row"
+isexternal = iForest::s_isExternalINode(X_singlerow, 1, 3)
+[test_cnt, fails] = record_test_result(testname, isexternal, test_cnt, fails)
+
+testname = "isExternalINode: First ID with depth(node_id) > max_depth"
+isexternal1 = iForest::s_isExternalINode(X_3x5_allequal, 2^2, 1)
+isexternal2 = iForest::s_isExternalINode(X_3x5_allequal, 2^4, 3)
+isexternal3 = iForest::s_isExternalINode(X_3x5_allequal, 2^6, 5)
+all_external = isexternal1 & isexternal2 & isexternal3
+[test_cnt, fails] = record_test_result(testname, all_external, test_cnt, fails)
+
+testname = "isExternalINode: IDs with depth(node_id) = max_depth"
+isexternal1 = iForest::s_isExternalINode(X_3x5_allequal, 2^2 - 1, 1)
+isexternal2 = iForest::s_isExternalINode(X_3x5_allequal, 2^2 - 2, 1)
+isexternal3 = iForest::s_isExternalINode(X_3x5_allequal, 2^4 - 1, 3)
+isexternal4 = iForest::s_isExternalINode(X_3x5_allequal, 2^4 - 8, 3)
+isexternal5 = iForest::s_isExternalINode(X_3x5_allequal, 2^6 - 1, 5)
+isexternal6 = iForest::s_isExternalINode(X_3x5_allequal, 2^6 - 12, 5)
+all_external = isexternal1 & isexternal2 & isexternal3 & isexternal4 & 
isexternal5 & isexternal6
+[test_cnt, fails] = record_test_result(testname, all_external, test_cnt, fails)
+
+testname = "isExternalINode: IDs with depth(node_id) < max_depth"
+isexternal1 = iForest::s_isExternalINode(X_3x5_allequal, 1, 1)
+isexternal2 = iForest::s_isExternalINode(X_3x5_allequal, 2^2, 3)
+isexternal3 = iForest::s_isExternalINode(X_3x5_allequal, 2^3 - 1, 3)
+isexternal4 = iForest::s_isExternalINode(X_3x5_allequal, 2^4, 5)
+isexternal5 = iForest::s_isExternalINode(X_3x5_allequal, 2^5 - 1, 5)
+all_external = isexternal1 & isexternal2 & isexternal3 & isexternal4 & 
isexternal5
+[test_cnt, fails] = record_test_result(testname, all_external == FALSE, 
test_cnt, fails)
+
+
+
+# s_addExternalINode
+# 
---------------------------------------------------------------------------------------------
+print("\ns_addExternalINode")
+
+testname = "addExternalINode: Empty X_node"
+M_res = iForest::s_addExternalINode(X_empty, 8, M_itree_4lvl_empty)
+M_res = iForest::s_addExternalINode(X_empty, 12, M_res)
+M_res = iForest::s_addExternalINode(X_empty, 15, M_res)
+[test_cnt, fails] = record_test_result(testname, as.scalar(rowSums(M_res)) == 
0, test_cnt, fails)
+
+testname = "addExternalINode: Different sizes for X_node"
+M_expected = M_itree_4lvl_empty
+M_res = iForest::s_addExternalINode(X_1x1, 8, M_itree_4lvl_empty)
+M_expected[1,16] = 1
+M_res = iForest::s_addExternalINode(X_1x1, 10, M_res)
+M_expected[1,20] = 1
+M_res = iForest::s_addExternalINode(X_4x4, 12, M_res)
+M_expected[1,24] = 4
+M_res = iForest::s_addExternalINode(X_3x5_allequal, 14, M_res)
+M_expected[1,28] = 3
+M_res = iForest::s_addExternalINode(X_singlerow, 15, M_res)
+M_expected[1,30] = 1
+[test_cnt, fails] = record_test_result(testname, matrices_equal(M_res, 
M_expected), test_cnt, fails)
+
+# s_addInternalINode
+# 
---------------------------------------------------------------------------------------------
+print("\ns_addInternalINode")
+
+testname = "addInternalINode"
+M_expected = M_itree_4lvl_empty
+M_res = iForest::s_addInternalINode(1, 2, 3.1, M_itree_4lvl_empty)
+M_expected[1, 1] = 2
+M_expected[1, 2] = 3.1
+M_res = iForest::s_addInternalINode(4, 3, -5, M_res)
+M_expected[1, 7] = 3
+M_expected[1, 8] = -5
+M_res = iForest::s_addInternalINode(5, 7, -1.2, M_res)
+M_expected[1, 9] = 7
+M_expected[1, 10] = -1.2
+M_res = iForest::s_addInternalINode(7, 1, 0, M_res)
+M_expected[1, 13] = 1
+M_expected[1, 14] = 0
+[test_cnt, fails] = record_test_result(testname, matrices_equal(M_res, 
M_expected), test_cnt, fails)
+
+
+# s_splitINode
+# 
---------------------------------------------------------------------------------------------
+print("\ns_splitINode")
+
+testname = "splitINode: Equal rows"
+[l_id, x_l, r_id, x_r] = iForest::s_splitINode(X_8x3_equalrows, 1, 1, 1)
+test_res1 = l_id == 2 & r_id == 3 & matrices_equal(x_l, X_8x3_equalrows) & 
nrow(x_r) == 0
+[l_id, x_l, r_id, x_r] = iForest::s_splitINode(X_8x3_equalrows, 2, 2, 2)
+test_res2 = l_id == 4 & r_id == 5 & matrices_equal(x_l, X_8x3_equalrows) & 
nrow(x_r) == 0
+[l_id, x_l, r_id, x_r] = iForest::s_splitINode(X_8x3_equalrows, 4, 3, 3)
+test_res3 = l_id == 8 & r_id == 9 & matrices_equal(x_l, X_8x3_equalrows) & 
nrow(x_r) == 0
+[test_cnt, fails] = record_test_result(testname, test_res1 & test_res2 & 
test_res3, test_cnt, fails)
+
+
+testname = "splitINode: Split in halfs"
+Xl_expected = X_4x3_equalcols[1:2]
+Xr_expected = X_4x3_equalcols[3:4]
+
+[l_id, x_l, r_id, x_r] = iForest::s_splitINode(X_4x3_equalcols, 1, 1, 2)
+test_res1 = l_id == 2 & r_id == 3 & matrices_equal(x_l, Xl_expected) & 
matrices_equal(x_r, Xr_expected)
+[l_id, x_l, r_id, x_r] = iForest::s_splitINode(X_4x3_equalcols, 2, 2, 2)
+test_res2 = l_id == 4 & r_id == 5 & matrices_equal(x_l, Xl_expected) & 
matrices_equal(x_r, Xr_expected)
+[l_id, x_l, r_id, x_r] = iForest::s_splitINode(X_4x3_equalcols, 4, 3, 2)
+test_res3 = l_id == 8 & r_id == 9 & matrices_equal(x_l, Xl_expected) & 
matrices_equal(x_r, Xr_expected)
+[test_cnt, fails] = record_test_result(testname, test_res1 & test_res2 & 
test_res3, test_cnt, fails)
+
+# s_sampleRows
+# 
---------------------------------------------------------------------------------------------
+print("\ns_sampleRows")
+testname = "sampleRows: Equal Rows"
+X_res = iForest::s_sampleRows(X_8x3_equalrows, 2, -1)
+X_expected = X_8x3_equalrows[1:2,]
+[test_cnt, fails] = record_test_result(testname, matrices_equal(X_res, 
X_expected), test_cnt, fails)
+
+testname = "sampleRows: Random Seed"
+X_res1 = iForest::s_sampleRows(X_6x6_ordered, 3, 42)
+X_res2 = iForest::s_sampleRows(X_6x6_ordered, 3, 42)
+check_same_seed_equal_res = matrices_equal(X_res1, X_res2)
+
+X_res1 = iForest::s_sampleRows(X_6x6_ordered, 3, 21)
+X_res2 = iForest::s_sampleRows(X_6x6_ordered, 3, 42)
+check_diff_seed_diff_res = !matrices_equal(X_res1, X_res2)
+
+all_equal = TRUE
+for (i in 1:10) {
+  X_res1 = iForest::s_sampleRows(X_6x6_ordered, 2, -1)
+  X_res2 = iForest::s_sampleRows(X_6x6_ordered, 2, -1)
+
+  all_equal = all_equal & matrices_equal(X_res1, X_res2)
+}
+check_random_seed_random_res = !all_equal
+test_res = check_same_seed_equal_res & check_diff_seed_diff_res & 
check_random_seed_random_res
+[test_cnt, fails] = record_test_result(testname, test_res, test_cnt, fails)
+
+testname = "sampleRows: Sample all"
+X_res = iForest::s_sampleRows(X_6x6_ordered, nrow(X_6x6_ordered), -1)
+check_shuffeled = !matrices_equal(X_6x6_ordered, X_res)
+X_res = order(target=X_res, by=1)
+check_reordered = matrices_equal(X_6x6_ordered, X_res)
+test_res = check_shuffeled & check_reordered
+[test_cnt, fails] = record_test_result(testname, test_res, test_cnt, fails)
+
+
+# s_traverseITree
+# 
---------------------------------------------------------------------------------------------
+print("\ns_traverseITree")
+
+testname = "traverseITree: Equal training rows"
+M_tree1 = iForest::m_iTree(X=X_8x3_equalrows, max_depth=3)
+M_tree2 = iForest::m_iTree(X=X_8x3_equalrows, max_depth=5)
+
+x_1 = X_8x3_equalrows[1, ]
+[pathlength1, externalNodeSize1] = iForest::s_traverseITree(M_tree1, x_1)
+[pathlength2, externalNodeSize2] = iForest::s_traverseITree(M_tree2, x_1)
+check_equal_x_1 = pathlength1 == 3 & externalNodeSize1 == nrow(X_8x3_equalrows)
+check_equal_x_2 = pathlength2 == 5 & externalNodeSize2 == nrow(X_8x3_equalrows)
+check_equal = check_equal_x_1 & check_equal_x_2
+
+x_1 = X_8x3_equalrows[1, ] - 0.1
+[pathlength1, externalNodeSize1] = iForest::s_traverseITree(M_tree1, x_1)
+[pathlength2, externalNodeSize2] = iForest::s_traverseITree(M_tree2, x_1)
+check_smaller_x_1 = pathlength1 == 3 & externalNodeSize1 == 
nrow(X_8x3_equalrows)
+check_smaller_x_2 = pathlength2 == 5 & externalNodeSize2 == 
nrow(X_8x3_equalrows)
+check_smaller = check_smaller_x_1 & check_smaller_x_2
+
+x_1 = X_8x3_equalrows[1, ] + 0.1
+[pathlength1, externalNodeSize1] = iForest::s_traverseITree(M_tree1, x_1)
+[pathlength2, externalNodeSize2] = iForest::s_traverseITree(M_tree2, x_1)
+check_larger_x_1 = pathlength1 == 1 & externalNodeSize1 == 0
+check_larger_x_2 = pathlength2 == 1 & externalNodeSize2 == 0
+check_larger = check_larger_x_1 & check_larger_x_2
+
+[test_cnt, fails] = record_test_result(testname, check_equal & check_smaller & 
check_larger, test_cnt, fails)
+
+# s_cn
+# 
---------------------------------------------------------------------------------------------
+print("\ns_cn")
+testname = "s_cn"
+error_tolerance = 1e3
+test_res = abs(iForest::s_cn(2) - 1) < error_tolerance
+test_res = test_res & abs(iForest::s_cn(3) - 5/3) < error_tolerance
+test_res = test_res & abs(iForest::s_cn(4) - 13/6) < error_tolerance
+test_res = test_res & abs(iForest::s_cn(5) - 77/30) < error_tolerance
+test_res = test_res & abs(iForest::s_cn(10) - 4861/1260) < error_tolerance
+test_res = test_res & abs(iForest::s_cn(50) - 6.99841) < error_tolerance
+test_res = test_res & abs(iForest::s_cn(100) - 8.3747550) < error_tolerance
+test_res = test_res & abs(iForest::s_cn(500) - 11.5856468) < error_tolerance
+test_res = test_res & abs(iForest::s_cn(1000) - 12.970941) < error_tolerance
+test_res = test_res & abs(iForest::s_cn(5000) - 16.1890177) < error_tolerance
+test_res = test_res & abs(iForest::s_cn(10000) - 17.5752120) < error_tolerance
+test_res = test_res & abs(iForest::s_cn(50000) - 20.7940078) < error_tolerance
+
+[test_cnt, fails] = record_test_result(testname, test_res, test_cnt, fails)
+
+# 
=============================================================================================
+# Testing main functions
+# 
=============================================================================================
+print("\nTesting Main Functions")
+print("---------------------------------------------------------------")
+
+# m_iTree
+# 
---------------------------------------------------------------------------------------------
+print("\nm_iTree")
+testname = "iTree: Equal rows"
+M_res = iForest::m_iTree(X=X_8x3_equalrows, max_depth=3)
+# Since all rows are equal, this tree will grow exclusively to the left. With 
a max_depth=3 the linearized 
+# models will hence only have entries for IDs 1, 2, 4 and 8.
+# Since in X_8x3_equalrows the feature index is always equal to the only value 
for the feature,
+# internal nodes (1,2,4) will have the same entry for split feature and split 
values and the external node 8
+# will have a 0 for the first entry (indicating a leaf node) and the number of 
rows as the second entry
+check_id1 = as.scalar(M_res[1, 1] == M_res[1, 2])
+check_id2 = as.scalar(M_res[1, 3] == M_res[1, 4])
+check_id4 = as.scalar(M_res[1, 7] == M_res[1, 8])
+check_id8 = as.scalar(M_res[1, 15] == 0 & M_res[1, 16] == 
nrow(X_8x3_equalrows))
+check_consistent = is_itree_consistent(M=M_res, X=X_8x3_equalrows, max_depth=3)
+test_res = check_id1 & check_id2 & check_id4 & check_id8 & check_consistent
+[test_cnt, fails] = record_test_result(testname, test_res, test_cnt, fails)
+
+testname = "iTree: Consistency"
+# create 100 random iTrees and check their consistency
+check_consistent = TRUE
+for (i in 1:100) {
+  rand_max_depth = as.integer(as.scalar(rand(rows=1, cols=1, min=1, max=10)))
+  rand_ncols = as.integer(as.scalar(rand(rows=1, cols=1, min=1, max=100)))
+  rand_nrows = as.integer(as.scalar(rand(rows=1, cols=1, min=2, max=100)))
+  rand_X = rand(rows=rand_nrows, cols=rand_ncols, min=-100, max=100)
+
+  M = iForest::m_iTree(X=rand_X, max_depth=rand_max_depth)
+
+  tree_consistent = is_itree_consistent(M=M, X=rand_X, 
max_depth=rand_max_depth)
+  if (!tree_consistent) {
+    print("Consistency check failed!")
+    print("X: "+toString(rand_X))
+    print("M: "+toString(M))
+  }
+  check_consistent = check_consistent & tree_consistent  
+}
+[test_cnt, fails] = record_test_result(testname, check_consistent, test_cnt, 
fails)
+
+
+testname = "iTree: Random seed"
+M_res1 = iForest::m_iTree(X=X_4x4, max_depth=5, seed=42)
+M_res2 = iForest::m_iTree(X=X_4x4, max_depth=5, seed=42)
+check_same_seed_same_model = matrices_equal(M_res1, M_res2)
+
+M_res1 = iForest::m_iTree(X=X_4x4, max_depth=5, seed=21)
+M_res2 = iForest::m_iTree(X=X_4x4, max_depth=5, seed=42)
+check_different_seed_different_model = !matrices_equal(M_res1, M_res2)
+
+all_equal = TRUE
+for (i in 1:10) {
+  M_res1 = iForest::m_iTree(X=X_4x4, max_depth=5)
+  M_res2 = iForest::m_iTree(X=X_4x4, max_depth=5)
+
+  all_equal = all_equal & matrices_equal(M_res1, M_res2)
+}
+check_random_seed_random_model = !all_equal
+test_res = check_same_seed_same_model & check_different_seed_different_model & 
check_random_seed_random_model 
+[test_cnt, fails] = record_test_result(testname, test_res, test_cnt, fails)
+
+# m_iForest
+# 
---------------------------------------------------------------------------------------------
+print("\nm_iForest")
+testname = "iForest: Equal rows"
+# Since all rows are equal, all trees will grow exclusively to the left. 
+# For each tree, here we do the same checks as with test "iTree: Equal rows".
+# Addidiontally we we check the resulting forest for consistency
+subsampling_size = 5
+height_limit = ceil(log(subsampling_size, 2))
+n_trees = 2
+
+M_res = iForest::m_iForest(X=X_8x3_equalrows, n_trees=n_trees, 
subsampling_size=subsampling_size)
+check_consistent = is_iforest_consistent(M=M_res, X=X_8x3_equalrows, 
subsampling_size=subsampling_size)
+
+check_trees = TRUE
+for (i in 1:n_trees) {
+  M_tree = M_res[i,]
+  check_id1 = as.scalar(M_res[1, 1] == M_res[1, 2])
+  check_id2 = as.scalar(M_res[1, 3] == M_res[1, 4])
+  check_id4 = as.scalar(M_res[1, 7] == M_res[1, 8])
+  check_id8 = as.scalar(M_res[1, 15] == 0 & M_res[1, 16] == subsampling_size)
+  check_trees = check_trees & check_id1 & check_id2 & check_id4 & check_id8
+}
+[test_cnt, fails] = record_test_result(testname, check_consistent & 
check_trees, test_cnt, fails)
+
+testname = "iForest: Consistency"
+# create 100 random iForests and check their consistency
+check_consistent = TRUE
+for (i in 1:20) {
+  rand_n_trees = as.integer(as.scalar(rand(rows=1, cols=1, min=1, max=30)))
+  rand_X = rand(rows=rand_nrows, cols=rand_ncols, min=-100, max=100)
+  rand_ncols = as.integer(as.scalar(rand(rows=1, cols=1, min=1, max=100)))
+  rand_nrows = as.integer(as.scalar(rand(rows=1, cols=1, min=2, max=100)))
+  rand_subsampling_size = as.integer(as.scalar(rand(rows=1, cols=1, min=2, 
max=nrow(rand_X))))
+
+  M = iForest::m_iForest(X=rand_X, n_trees=rand_n_trees, 
subsampling_size=rand_subsampling_size)
+  check_consistent = check_consistent & is_iforest_consistent(M=M, X=rand_X, 
subsampling_size=rand_subsampling_size)
+}
+[test_cnt, fails] = record_test_result(testname, check_consistent, test_cnt, 
fails)
+
+
+testname = "iForest: Random seed"
+M_res1 = iForest::m_iForest(X=X_4x4, n_trees=2, subsampling_size=3, seed=42)
+M_res2 = iForest::m_iForest(X=X_4x4, n_trees=2, subsampling_size=3, seed=42)
+check_same_seed_same_model = matrices_equal(M_res1, M_res2)
+
+M_res1 = iForest::m_iForest(X=X_4x4, n_trees=5, subsampling_size=4, seed=24)
+M_res2 = iForest::m_iForest(X=X_4x4, n_trees=5, subsampling_size=4, seed=42)
+check_different_seed_different_model = !matrices_equal(M_res1, M_res2)
+
+all_equal = TRUE
+for (i in 1:10) {
+  M_res1 = iForest::m_iForest(X=X_4x4, subsampling_size=3, n_trees=5)
+  M_res2 = iForest::m_iForest(X=X_4x4, subsampling_size=3, n_trees=5)
+
+  all_equal = all_equal & matrices_equal(M_res1, M_res2)
+}
+check_random_seed_random_model = !all_equal
+test_res = check_same_seed_same_model & check_different_seed_different_model & 
check_random_seed_random_model 
+[test_cnt, fails] = record_test_result(testname, test_res, test_cnt, fails)
+
+
+# m_PathLength
+# 
---------------------------------------------------------------------------------------------
+print("\nm_PathLength")
+testname = "PathLength"
+M_tree1 = iForest::m_iTree(X=X_8x3_equalrows, max_depth=3)
+M_tree2 = iForest::m_iTree(X=X_8x3_equalrows, max_depth=5)
+M_tree3 = iForest::m_iTree(X=X_8x3_equalrows, max_depth=10)
+# c(8) calculated by hand
+cn_8 = 3.43571428
+
+# Results in pathlength max_depth and leafnode-size length(Tree)
+x_equal = X_8x3_equalrows[1, ]
+# Results in pathlength 1 with leafnode-size 0
+x_larger = X_8x3_equalrows[1, ] + 0.1
+
+test_res = iForest::m_PathLength(M_tree1, x_equal) == 3 + cn_8
+test_res = iForest::m_PathLength(M_tree2, x_equal) == 5 + cn_8
+test_res = iForest::m_PathLength(M_tree3, x_equal) == 10 + cn_8
+test_res = iForest::m_PathLength(M_tree1, x_larger) == 1
+test_res = iForest::m_PathLength(M_tree2, x_larger) == 1
+test_res = iForest::m_PathLength(M_tree3, x_larger) == 1
+[test_cnt, fails] = record_test_result(testname, test_res, test_cnt, fails)
+
+# m_PathLength
+# 
---------------------------------------------------------------------------------------------
+print("\nm_score")
+testname = "score"
+# As in test "iForest: Equal Rows", the iTrees built here will grow to the 
left exclusively.
+# => the leftmost external node will have the size `subsampling_size` and all 
other external 
+# nodes will have size 0. c(n) for the leaf nodes will hence be 
c(subsampling_size) and 1 respectively. 
+# => The score for a sample landing in the leftmost node will hence be 
+# 2^-(max_PathLength/c(subsampling_size)) where max_PathLength = max_depth + 
c(subsampling_size)
+# and max_path = ceil(log(subsampling_size, 2))
+# For the rightmost node the score will be 2^-(1/c(subsampling_size)).
+# (Note: Recall that c(n) for the leaf nodes is not the normalization constant 
of the score, the normalization constant is always c(subsampling_size))
+
+# Sample that will always end in the leftmost leaf node
+x_equal = X_8x3_equalrows[1,]
+# Sample that will always end in the rightmost leaf node
+x_larger = X_8x3_equalrows[1,] + 0.1
+error_tolerance = 1e-5
+
+# Subsampling_size 2
+M_forest = iForest::m_iForest(X=X_8x3_equalrows, n_trees=10, 
subsampling_size=2)
+res_score_l = iForest::m_score(M_forest, x_equal, 2)
+res_score_r = iForest::m_score(M_forest, x_larger, 2)
+check_subsample2 = abs(res_score_l - 0.25) < error_tolerance & abs(res_score_r 
- 0.5) < error_tolerance
+
+
+# Subsampling_size 3
+M_forest = iForest::m_iForest(X=X_8x3_equalrows, n_trees=10, 
subsampling_size=3)
+res_score_l = iForest::m_score(M_forest, x_equal, 3)
+res_score_r = iForest::m_score(M_forest, x_larger, 3)
+check_subsample3 = abs(res_score_l - 0.217637) < error_tolerance & 
abs(res_score_r - 0.65975) < error_tolerance
+
+# Subsampling_size 4
+M_forest = iForest::m_iForest(X=X_8x3_equalrows, n_trees=10, 
subsampling_size=4)
+res_score_l = iForest::m_score(M_forest, x_equal, 4)
+res_score_r = iForest::m_score(M_forest, x_larger, 4)
+check_subsample4 = abs(res_score_l - 0.263691) < error_tolerance & 
abs(res_score_r - 0.726211) < error_tolerance
+
+# Subsampling_size 8
+M_forest = iForest::m_iForest(X=X_8x3_equalrows, n_trees=10, 
subsampling_size=8)
+res_score_l = iForest::m_score(M_forest, x_equal, 8)
+res_score_r = iForest::m_score(M_forest, x_larger, 8)
+check_subsample8 = abs(res_score_l - 0.27297) < error_tolerance & 
abs(res_score_r - 0.81730) < error_tolerance
+
+
+test_res = check_subsample2 & check_subsample3 & check_subsample4 & 
check_subsample8
+[test_cnt, fails] = record_test_result(testname, test_res, test_cnt, fails)
+
+
+# 
=============================================================================================
+# Summary
+# 
=============================================================================================
+print("\n===============================================================")
+succ_test_cnt = test_cnt - length(fails) 
+print(toString(succ_test_cnt) + "/" + toString(test_cnt) + " tests succeded!")
+if (length(fails) > 0) {
+  print("Tests that failed:")
+  print(toString(fails))
+}
+
+print("\n\n")
+
+
+# F U N C T I O N A L   T E S T S
+# 
---------------------------------------------------------------------------------------------
+# 
---------------------------------------------------------------------------------------------
+
+print("Starting Functional Tests ")
+print("===============================================================")
+
+print("No outliers")
+print("--------------------------")
+nr_runs = 20
+nr_samples = 1000
+nr_features = 10
+print(toString(nr_runs) + " runs with "+toString(nr_samples)+" uniformally 
distributed samples consisting of " + toString(nr_features)+ " features.")
+print("No outliers.")
+print("When there are no anomalies in X, we expect all samples to have an 
anomaly score of ~0.5")
+print("Hence, this test reports scores < 0.4 or scores > 0.6.")
+print("\n")
+
+nr_unexpected_scores = matrix(0, rows=1, cols=nr_runs)
+parfor (i in 1:nr_runs) {
+  X = rand(rows=nr_samples, cols=nr_features, min=-100, max=100)
+  model = iForest::outlierByIsolationForest(X=X, n_trees=20, 
subsampling_size=100)
+  scores = iForest::outlierByIsolationForestApply(iForestModel=model, X=X)
+  
+  unexpected_scores_indicator = scores < 0.4 | scores > 0.6
+  if (sum(unexpected_scores_indicator > 0)) {
+    print("- Run "+i+": Unexpected scores found: ")
+    unexpected = removeEmpty(target=scores, margin="rows", 
select=unexpected_scores_indicator)
+    print(toString(unexpected))
+  }
+
+  nr_unexpected_scores[1, i] = sum(unexpected_scores_indicator)
+}
+
+print("Result: "+toString(as.integer(sum(nr_unexpected_scores)))+ 
"/"+toString(as.integer(nr_runs*nr_samples))+" scores found to be unexpected!")
+print("\n\n")
+
+print("Training with 1% outliers")
+print("--------------------------")
+nr_runs = 10
+nr_samples = 10000
+nr_features = 5
+max_outlier_features = 5
+print(toString(nr_runs) + " runs with "+toString(nr_samples)+" normaly 
distributed samples (mean=0, std=1) consisting of "+nr_features+" features.")
+print("Training set contains 1% outliers.")
+print("Outliers are created by randomly picking up to " + 
toString(max_outlier_features) + " feature and adding/substracting the a value 
between 10 and 100.\n")
+print("To test the algorithm the test set contains 12 random samples in groups 
of 3:")
+print("- Samples 1-3: No outliers.")
+print("- Samples 4-6: Outliers with randomly added deviation between 5 and 
10.")
+print("- Samples 7-9: Outliers with randomly added deviation between 50 and 
100.")
+print("- Samples 10-12: Outliers with randomly added deviation between 500 and 
1000.")
+
+
+create_rand_outliers = function(Int rows, Int cols, Int max_outlier_features, 
Double min_dev, Double max_dev) 
+  return(Matrix[Double] outliers) {
+  outliers = rand(rows=rows, cols=cols, pdf="normal")
+  for (r_idx in 1:rows) {
+    n_outlier_feats = as.scalar(sample(max_outlier_features, 1))
+    outlier_feats = sample(cols, n_outlier_feats)
+    for (i_feat in 1:n_outlier_feats) {
+      f_idx = as.scalar(outlier_feats[i_feat])
+      dev = as.scalar(rand(rows=1, cols=1, min=min_dev, max=max_dev))
+      if (r_idx < as.integer(rows/2))
+        outliers[r_idx, f_idx] = outliers[r_idx, f_idx] - dev
+      else
+        outliers[r_idx, f_idx] = outliers[r_idx, f_idx] + dev
+    }
+  }
+}
+
+parfor (i_run in 1:nr_runs) {
+  X_train = rand(rows=nr_samples, cols=nr_features, pdf="normal")
+  n_train_outliers = as.integer(nr_samples/100)
+  X_train[1:n_train_outliers,] = create_rand_outliers(n_train_outliers, 
nr_features, max_outlier_features, 10, 100)
+
+
+  iF_model = iForest::outlierByIsolationForest(X=X_train, n_trees=100, 
subsampling_size=250)
+
+  X_test = rbind(
+    rand(rows=3, cols=nr_features, pdf="normal"),
+    create_rand_outliers(3, nr_features, max_outlier_features, 5, 10),
+    create_rand_outliers(3, nr_features, max_outlier_features, 50, 100),
+    create_rand_outliers(3, nr_features, max_outlier_features, 500, 1000)
+  )
+
+  test_scores = iForest::outlierByIsolationForestApply(iForestModel=iF_model, 
X=X_test)
+  
+  print("Run "+i_run+":")
+  print("- Scores:")
+  print(toString(t(test_scores)))
+  print("- Is Outlier?: ")
+  print(toString(t(test_scores) > 0.5))
+  print("\n")
+}
+
+
+print("===============================================================")
+print("TESTING FINISHED!")
\ No newline at end of file

Reply via email to