Github user KyleLi1985 commented on a diff in the pull request: https://github.com/apache/spark/pull/23126#discussion_r237551217 --- Diff: mllib/src/main/scala/org/apache/spark/mllib/linalg/distributed/RowMatrix.scala --- @@ -128,6 +128,82 @@ class RowMatrix @Since("1.0.0") ( RowMatrix.triuToFull(n, GU.data) } + private def computeDenseVectorCovariance(mean: Vector, n: Int, m: Long): Matrix = { + + val bc = rows.context.broadcast(mean) + + // Computes n*(n+1)/2, avoiding overflow in the multiplication. + // This succeeds when n <= 65535, which is checked above + val nt = if (n % 2 == 0) ((n / 2) * (n + 1)) else (n * ((n + 1) / 2)) + + val MU = rows.treeAggregate(new BDV[Double](nt))( + seqOp = (U, v) => { + + val n = v.size + val na = Array.ofDim[Double](n) + val means = bc.value + if (v.isInstanceOf[DenseVector]) { + v.foreachActive{(index, value) => --- End diff -- Just, I do a quick test, input data val data = Seq( Vectors.dense(100000.000004, 199999.999999), Vectors.dense(100000.000012, 0.0).toSparse, Vectors.dense(99999.9999931, 200000.000003), Vectors.dense(99999.9999977, 200000.000001) ) and val data = Seq( Vectors.dense(100000.000004, 199999.999999), Vectors.dense(100000.000012, 0.0), Vectors.dense(99999.9999931, 200000.000003), Vectors.dense(99999.9999977, 200000.000001) ) for all dense vector case, the breeze and spark realization (substract the mean from 0) get the same result 6.711333870761802E-11 -0.6866668021258175 -0.6866668021258175 1.00000000001E10 but if change the one dense vector to sparse vector and not substract the mean from 0, the spark get the result 6.711333870761802E-11 -0.17166651863796398 -0.17166651863796398 2.500000000024999E9
--- --------------------------------------------------------------------- To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org For additional commands, e-mail: reviews-h...@spark.apache.org