Repository: spark
Updated Branches:
  refs/heads/master 20fd35dfd -> 35e8ab939


[SPARK-10615] [PYSPARK] change assertEquals to assertEqual

As ```assertEquals``` is deprecated, so we need to change ```assertEquals``` to 
```assertEqual``` for existing python unit tests.

Author: Yanbo Liang <yblia...@gmail.com>

Closes #8814 from yanboliang/spark-10615.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/35e8ab93
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/35e8ab93
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/35e8ab93

Branch: refs/heads/master
Commit: 35e8ab939000d4a1a01c1af4015c25ff6f4013a3
Parents: 20fd35d
Author: Yanbo Liang <yblia...@gmail.com>
Authored: Fri Sep 18 09:53:52 2015 -0700
Committer: Xiangrui Meng <m...@databricks.com>
Committed: Fri Sep 18 09:53:52 2015 -0700

----------------------------------------------------------------------
 python/pyspark/ml/tests.py        |  16 ++--
 python/pyspark/mllib/tests.py     | 162 ++++++++++++++++-----------------
 python/pyspark/sql/tests.py       |  18 ++--
 python/pyspark/streaming/tests.py |   2 +-
 4 files changed, 99 insertions(+), 99 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/35e8ab93/python/pyspark/ml/tests.py
----------------------------------------------------------------------
diff --git a/python/pyspark/ml/tests.py b/python/pyspark/ml/tests.py
index b892318..648fa88 100644
--- a/python/pyspark/ml/tests.py
+++ b/python/pyspark/ml/tests.py
@@ -182,7 +182,7 @@ class ParamTests(PySparkTestCase):
         self.assertEqual(testParams.getMaxIter(), 10)
         testParams.setMaxIter(100)
         self.assertTrue(testParams.isSet(maxIter))
-        self.assertEquals(testParams.getMaxIter(), 100)
+        self.assertEqual(testParams.getMaxIter(), 100)
 
         self.assertTrue(testParams.hasParam(inputCol))
         self.assertFalse(testParams.hasDefault(inputCol))
@@ -195,7 +195,7 @@ class ParamTests(PySparkTestCase):
         testParams._setDefault(seed=41)
         testParams.setSeed(43)
 
-        self.assertEquals(
+        self.assertEqual(
             testParams.explainParams(),
             "\n".join(["inputCol: input column name (undefined)",
                        "maxIter: max number of iterations (>= 0) (default: 10, 
current: 100)",
@@ -264,23 +264,23 @@ class FeatureTests(PySparkTestCase):
         self.assertEqual(ngram0.getInputCol(), "input")
         self.assertEqual(ngram0.getOutputCol(), "output")
         transformedDF = ngram0.transform(dataset)
-        self.assertEquals(transformedDF.head().output, ["a b c d", "b c d e"])
+        self.assertEqual(transformedDF.head().output, ["a b c d", "b c d e"])
 
     def test_stopwordsremover(self):
         sqlContext = SQLContext(self.sc)
         dataset = sqlContext.createDataFrame([Row(input=["a", "panda"])])
         stopWordRemover = StopWordsRemover(inputCol="input", 
outputCol="output")
         # Default
-        self.assertEquals(stopWordRemover.getInputCol(), "input")
+        self.assertEqual(stopWordRemover.getInputCol(), "input")
         transformedDF = stopWordRemover.transform(dataset)
-        self.assertEquals(transformedDF.head().output, ["panda"])
+        self.assertEqual(transformedDF.head().output, ["panda"])
         # Custom
         stopwords = ["panda"]
         stopWordRemover.setStopWords(stopwords)
-        self.assertEquals(stopWordRemover.getInputCol(), "input")
-        self.assertEquals(stopWordRemover.getStopWords(), stopwords)
+        self.assertEqual(stopWordRemover.getInputCol(), "input")
+        self.assertEqual(stopWordRemover.getStopWords(), stopwords)
         transformedDF = stopWordRemover.transform(dataset)
-        self.assertEquals(transformedDF.head().output, ["a"])
+        self.assertEqual(transformedDF.head().output, ["a"])
 
 
 class HasInducedError(Params):

http://git-wip-us.apache.org/repos/asf/spark/blob/35e8ab93/python/pyspark/mllib/tests.py
----------------------------------------------------------------------
diff --git a/python/pyspark/mllib/tests.py b/python/pyspark/mllib/tests.py
index 636f9a0..96cf134 100644
--- a/python/pyspark/mllib/tests.py
+++ b/python/pyspark/mllib/tests.py
@@ -166,13 +166,13 @@ class VectorTests(MLlibTestCase):
                      [1., 2., 3., 4.],
                      [1., 2., 3., 4.]])
         arr = pyarray.array('d', [0, 1, 2, 3])
-        self.assertEquals(10.0, sv.dot(dv))
+        self.assertEqual(10.0, sv.dot(dv))
         self.assertTrue(array_equal(array([3., 6., 9., 12.]), sv.dot(mat)))
-        self.assertEquals(30.0, dv.dot(dv))
+        self.assertEqual(30.0, dv.dot(dv))
         self.assertTrue(array_equal(array([10., 20., 30., 40.]), dv.dot(mat)))
-        self.assertEquals(30.0, lst.dot(dv))
+        self.assertEqual(30.0, lst.dot(dv))
         self.assertTrue(array_equal(array([10., 20., 30., 40.]), lst.dot(mat)))
-        self.assertEquals(7.0, sv.dot(arr))
+        self.assertEqual(7.0, sv.dot(arr))
 
     def test_squared_distance(self):
         sv = SparseVector(4, {1: 1, 3: 2})
@@ -181,27 +181,27 @@ class VectorTests(MLlibTestCase):
         lst1 = [4, 3, 2, 1]
         arr = pyarray.array('d', [0, 2, 1, 3])
         narr = array([0, 2, 1, 3])
-        self.assertEquals(15.0, _squared_distance(sv, dv))
-        self.assertEquals(25.0, _squared_distance(sv, lst))
-        self.assertEquals(20.0, _squared_distance(dv, lst))
-        self.assertEquals(15.0, _squared_distance(dv, sv))
-        self.assertEquals(25.0, _squared_distance(lst, sv))
-        self.assertEquals(20.0, _squared_distance(lst, dv))
-        self.assertEquals(0.0, _squared_distance(sv, sv))
-        self.assertEquals(0.0, _squared_distance(dv, dv))
-        self.assertEquals(0.0, _squared_distance(lst, lst))
-        self.assertEquals(25.0, _squared_distance(sv, lst1))
-        self.assertEquals(3.0, _squared_distance(sv, arr))
-        self.assertEquals(3.0, _squared_distance(sv, narr))
+        self.assertEqual(15.0, _squared_distance(sv, dv))
+        self.assertEqual(25.0, _squared_distance(sv, lst))
+        self.assertEqual(20.0, _squared_distance(dv, lst))
+        self.assertEqual(15.0, _squared_distance(dv, sv))
+        self.assertEqual(25.0, _squared_distance(lst, sv))
+        self.assertEqual(20.0, _squared_distance(lst, dv))
+        self.assertEqual(0.0, _squared_distance(sv, sv))
+        self.assertEqual(0.0, _squared_distance(dv, dv))
+        self.assertEqual(0.0, _squared_distance(lst, lst))
+        self.assertEqual(25.0, _squared_distance(sv, lst1))
+        self.assertEqual(3.0, _squared_distance(sv, arr))
+        self.assertEqual(3.0, _squared_distance(sv, narr))
 
     def test_hash(self):
         v1 = DenseVector([0.0, 1.0, 0.0, 5.5])
         v2 = SparseVector(4, [(1, 1.0), (3, 5.5)])
         v3 = DenseVector([0.0, 1.0, 0.0, 5.5])
         v4 = SparseVector(4, [(1, 1.0), (3, 2.5)])
-        self.assertEquals(hash(v1), hash(v2))
-        self.assertEquals(hash(v1), hash(v3))
-        self.assertEquals(hash(v2), hash(v3))
+        self.assertEqual(hash(v1), hash(v2))
+        self.assertEqual(hash(v1), hash(v3))
+        self.assertEqual(hash(v2), hash(v3))
         self.assertFalse(hash(v1) == hash(v4))
         self.assertFalse(hash(v2) == hash(v4))
 
@@ -212,8 +212,8 @@ class VectorTests(MLlibTestCase):
         v4 = SparseVector(6, [(1, 1.0), (3, 5.5)])
         v5 = DenseVector([0.0, 1.0, 0.0, 2.5])
         v6 = SparseVector(4, [(1, 1.0), (3, 2.5)])
-        self.assertEquals(v1, v2)
-        self.assertEquals(v1, v3)
+        self.assertEqual(v1, v2)
+        self.assertEqual(v1, v3)
         self.assertFalse(v2 == v4)
         self.assertFalse(v1 == v5)
         self.assertFalse(v1 == v6)
@@ -238,13 +238,13 @@ class VectorTests(MLlibTestCase):
 
     def test_sparse_vector_indexing(self):
         sv = SparseVector(4, {1: 1, 3: 2})
-        self.assertEquals(sv[0], 0.)
-        self.assertEquals(sv[3], 2.)
-        self.assertEquals(sv[1], 1.)
-        self.assertEquals(sv[2], 0.)
-        self.assertEquals(sv[-1], 2)
-        self.assertEquals(sv[-2], 0)
-        self.assertEquals(sv[-4], 0)
+        self.assertEqual(sv[0], 0.)
+        self.assertEqual(sv[3], 2.)
+        self.assertEqual(sv[1], 1.)
+        self.assertEqual(sv[2], 0.)
+        self.assertEqual(sv[-1], 2)
+        self.assertEqual(sv[-2], 0)
+        self.assertEqual(sv[-4], 0)
         for ind in [4, -5]:
             self.assertRaises(ValueError, sv.__getitem__, ind)
         for ind in [7.8, '1']:
@@ -255,7 +255,7 @@ class VectorTests(MLlibTestCase):
         expected = [[0, 6], [1, 8], [4, 10]]
         for i in range(3):
             for j in range(2):
-                self.assertEquals(mat[i, j], expected[i][j])
+                self.assertEqual(mat[i, j], expected[i][j])
 
     def test_repr_dense_matrix(self):
         mat = DenseMatrix(3, 2, [0, 1, 4, 6, 8, 10])
@@ -308,11 +308,11 @@ class VectorTests(MLlibTestCase):
         # Test sparse matrix creation.
         sm1 = SparseMatrix(
             3, 4, [0, 2, 2, 4, 4], [1, 2, 1, 2], [1.0, 2.0, 4.0, 5.0])
-        self.assertEquals(sm1.numRows, 3)
-        self.assertEquals(sm1.numCols, 4)
-        self.assertEquals(sm1.colPtrs.tolist(), [0, 2, 2, 4, 4])
-        self.assertEquals(sm1.rowIndices.tolist(), [1, 2, 1, 2])
-        self.assertEquals(sm1.values.tolist(), [1.0, 2.0, 4.0, 5.0])
+        self.assertEqual(sm1.numRows, 3)
+        self.assertEqual(sm1.numCols, 4)
+        self.assertEqual(sm1.colPtrs.tolist(), [0, 2, 2, 4, 4])
+        self.assertEqual(sm1.rowIndices.tolist(), [1, 2, 1, 2])
+        self.assertEqual(sm1.values.tolist(), [1.0, 2.0, 4.0, 5.0])
         self.assertTrue(
             repr(sm1),
             'SparseMatrix(3, 4, [0, 2, 2, 4, 4], [1, 2, 1, 2], [1.0, 2.0, 4.0, 
5.0], False)')
@@ -325,13 +325,13 @@ class VectorTests(MLlibTestCase):
 
         for i in range(3):
             for j in range(4):
-                self.assertEquals(expected[i][j], sm1[i, j])
+                self.assertEqual(expected[i][j], sm1[i, j])
         self.assertTrue(array_equal(sm1.toArray(), expected))
 
         # Test conversion to dense and sparse.
         smnew = sm1.toDense().toSparse()
-        self.assertEquals(sm1.numRows, smnew.numRows)
-        self.assertEquals(sm1.numCols, smnew.numCols)
+        self.assertEqual(sm1.numRows, smnew.numRows)
+        self.assertEqual(sm1.numCols, smnew.numCols)
         self.assertTrue(array_equal(sm1.colPtrs, smnew.colPtrs))
         self.assertTrue(array_equal(sm1.rowIndices, smnew.rowIndices))
         self.assertTrue(array_equal(sm1.values, smnew.values))
@@ -339,11 +339,11 @@ class VectorTests(MLlibTestCase):
         sm1t = SparseMatrix(
             3, 4, [0, 2, 3, 5], [0, 1, 2, 0, 2], [3.0, 2.0, 4.0, 9.0, 8.0],
             isTransposed=True)
-        self.assertEquals(sm1t.numRows, 3)
-        self.assertEquals(sm1t.numCols, 4)
-        self.assertEquals(sm1t.colPtrs.tolist(), [0, 2, 3, 5])
-        self.assertEquals(sm1t.rowIndices.tolist(), [0, 1, 2, 0, 2])
-        self.assertEquals(sm1t.values.tolist(), [3.0, 2.0, 4.0, 9.0, 8.0])
+        self.assertEqual(sm1t.numRows, 3)
+        self.assertEqual(sm1t.numCols, 4)
+        self.assertEqual(sm1t.colPtrs.tolist(), [0, 2, 3, 5])
+        self.assertEqual(sm1t.rowIndices.tolist(), [0, 1, 2, 0, 2])
+        self.assertEqual(sm1t.values.tolist(), [3.0, 2.0, 4.0, 9.0, 8.0])
 
         expected = [
             [3, 2, 0, 0],
@@ -352,18 +352,18 @@ class VectorTests(MLlibTestCase):
 
         for i in range(3):
             for j in range(4):
-                self.assertEquals(expected[i][j], sm1t[i, j])
+                self.assertEqual(expected[i][j], sm1t[i, j])
         self.assertTrue(array_equal(sm1t.toArray(), expected))
 
     def test_dense_matrix_is_transposed(self):
         mat1 = DenseMatrix(3, 2, [0, 4, 1, 6, 3, 9], isTransposed=True)
         mat = DenseMatrix(3, 2, [0, 1, 3, 4, 6, 9])
-        self.assertEquals(mat1, mat)
+        self.assertEqual(mat1, mat)
 
         expected = [[0, 4], [1, 6], [3, 9]]
         for i in range(3):
             for j in range(2):
-                self.assertEquals(mat1[i, j], expected[i][j])
+                self.assertEqual(mat1[i, j], expected[i][j])
         self.assertTrue(array_equal(mat1.toArray(), expected))
 
         sm = mat1.toSparse()
@@ -412,8 +412,8 @@ class ListTests(MLlibTestCase):
         ]
         clusters = KMeans.train(self.sc.parallelize(data), 2, 
initializationMode="k-means||",
                                 initializationSteps=7, epsilon=1e-4)
-        self.assertEquals(clusters.predict(data[0]), clusters.predict(data[1]))
-        self.assertEquals(clusters.predict(data[2]), clusters.predict(data[3]))
+        self.assertEqual(clusters.predict(data[0]), clusters.predict(data[1]))
+        self.assertEqual(clusters.predict(data[2]), clusters.predict(data[3]))
 
     def test_kmeans_deterministic(self):
         from pyspark.mllib.clustering import KMeans
@@ -443,8 +443,8 @@ class ListTests(MLlibTestCase):
         clusters = GaussianMixture.train(data, 2, convergenceTol=0.001,
                                          maxIterations=10, seed=56)
         labels = clusters.predict(data).collect()
-        self.assertEquals(labels[0], labels[1])
-        self.assertEquals(labels[2], labels[3])
+        self.assertEqual(labels[0], labels[1])
+        self.assertEqual(labels[2], labels[3])
 
     def test_gmm_deterministic(self):
         from pyspark.mllib.clustering import GaussianMixture
@@ -456,7 +456,7 @@ class ListTests(MLlibTestCase):
         clusters2 = GaussianMixture.train(data, 5, convergenceTol=0.001,
                                           maxIterations=10, seed=63)
         for c1, c2 in zip(clusters1.weights, clusters2.weights):
-            self.assertEquals(round(c1, 7), round(c2, 7))
+            self.assertEqual(round(c1, 7), round(c2, 7))
 
     def test_classification(self):
         from pyspark.mllib.classification import LogisticRegressionWithSGD, 
SVMWithSGD, NaiveBayes
@@ -711,18 +711,18 @@ class SciPyTests(MLlibTestCase):
         lil[1, 0] = 1
         lil[3, 0] = 2
         sv = SparseVector(4, {1: 1, 3: 2})
-        self.assertEquals(sv, _convert_to_vector(lil))
-        self.assertEquals(sv, _convert_to_vector(lil.tocsc()))
-        self.assertEquals(sv, _convert_to_vector(lil.tocoo()))
-        self.assertEquals(sv, _convert_to_vector(lil.tocsr()))
-        self.assertEquals(sv, _convert_to_vector(lil.todok()))
+        self.assertEqual(sv, _convert_to_vector(lil))
+        self.assertEqual(sv, _convert_to_vector(lil.tocsc()))
+        self.assertEqual(sv, _convert_to_vector(lil.tocoo()))
+        self.assertEqual(sv, _convert_to_vector(lil.tocsr()))
+        self.assertEqual(sv, _convert_to_vector(lil.todok()))
 
         def serialize(l):
             return ser.loads(ser.dumps(_convert_to_vector(l)))
-        self.assertEquals(sv, serialize(lil))
-        self.assertEquals(sv, serialize(lil.tocsc()))
-        self.assertEquals(sv, serialize(lil.tocsr()))
-        self.assertEquals(sv, serialize(lil.todok()))
+        self.assertEqual(sv, serialize(lil))
+        self.assertEqual(sv, serialize(lil.tocsc()))
+        self.assertEqual(sv, serialize(lil.tocsr()))
+        self.assertEqual(sv, serialize(lil.todok()))
 
     def test_dot(self):
         from scipy.sparse import lil_matrix
@@ -730,7 +730,7 @@ class SciPyTests(MLlibTestCase):
         lil[1, 0] = 1
         lil[3, 0] = 2
         dv = DenseVector(array([1., 2., 3., 4.]))
-        self.assertEquals(10.0, dv.dot(lil))
+        self.assertEqual(10.0, dv.dot(lil))
 
     def test_squared_distance(self):
         from scipy.sparse import lil_matrix
@@ -739,8 +739,8 @@ class SciPyTests(MLlibTestCase):
         lil[3, 0] = 2
         dv = DenseVector(array([1., 2., 3., 4.]))
         sv = SparseVector(4, {0: 1, 1: 2, 2: 3, 3: 4})
-        self.assertEquals(15.0, dv.squared_distance(lil))
-        self.assertEquals(15.0, sv.squared_distance(lil))
+        self.assertEqual(15.0, dv.squared_distance(lil))
+        self.assertEqual(15.0, sv.squared_distance(lil))
 
     def scipy_matrix(self, size, values):
         """Create a column SciPy matrix from a dictionary of values"""
@@ -759,8 +759,8 @@ class SciPyTests(MLlibTestCase):
             self.scipy_matrix(3, {2: 1.1})
         ]
         clusters = KMeans.train(self.sc.parallelize(data), 2, 
initializationMode="k-means||")
-        self.assertEquals(clusters.predict(data[0]), clusters.predict(data[1]))
-        self.assertEquals(clusters.predict(data[2]), clusters.predict(data[3]))
+        self.assertEqual(clusters.predict(data[0]), clusters.predict(data[1]))
+        self.assertEqual(clusters.predict(data[2]), clusters.predict(data[3]))
 
     def test_classification(self):
         from pyspark.mllib.classification import LogisticRegressionWithSGD, 
SVMWithSGD, NaiveBayes
@@ -984,12 +984,12 @@ class Word2VecTests(MLlibTestCase):
             .setNumIterations(10) \
             .setSeed(1024) \
             .setMinCount(3)
-        self.assertEquals(model.vectorSize, 2)
+        self.assertEqual(model.vectorSize, 2)
         self.assertTrue(model.learningRate < 0.02)
-        self.assertEquals(model.numPartitions, 2)
-        self.assertEquals(model.numIterations, 10)
-        self.assertEquals(model.seed, 1024)
-        self.assertEquals(model.minCount, 3)
+        self.assertEqual(model.numPartitions, 2)
+        self.assertEqual(model.numIterations, 10)
+        self.assertEqual(model.seed, 1024)
+        self.assertEqual(model.minCount, 3)
 
     def test_word2vec_get_vectors(self):
         data = [
@@ -1002,7 +1002,7 @@ class Word2VecTests(MLlibTestCase):
             ["a"]
         ]
         model = Word2Vec().fit(self.sc.parallelize(data))
-        self.assertEquals(len(model.getVectors()), 3)
+        self.assertEqual(len(model.getVectors()), 3)
 
 
 class StandardScalerTests(MLlibTestCase):
@@ -1044,8 +1044,8 @@ class StreamingKMeansTest(MLLibStreamingTestCase):
         """Test that the model params are set correctly"""
         stkm = StreamingKMeans()
         stkm.setK(5).setDecayFactor(0.0)
-        self.assertEquals(stkm._k, 5)
-        self.assertEquals(stkm._decayFactor, 0.0)
+        self.assertEqual(stkm._k, 5)
+        self.assertEqual(stkm._decayFactor, 0.0)
 
         # Model not set yet.
         self.assertIsNone(stkm.latestModel())
@@ -1053,9 +1053,9 @@ class StreamingKMeansTest(MLLibStreamingTestCase):
 
         stkm.setInitialCenters(
             centers=[[0.0, 0.0], [1.0, 1.0]], weights=[1.0, 1.0])
-        self.assertEquals(
+        self.assertEqual(
             stkm.latestModel().centers, [[0.0, 0.0], [1.0, 1.0]])
-        self.assertEquals(stkm.latestModel().clusterWeights, [1.0, 1.0])
+        self.assertEqual(stkm.latestModel().clusterWeights, [1.0, 1.0])
 
     def test_accuracy_for_single_center(self):
         """Test that parameters obtained are correct for a single center."""
@@ -1070,7 +1070,7 @@ class StreamingKMeansTest(MLLibStreamingTestCase):
         self.ssc.start()
 
         def condition():
-            self.assertEquals(stkm.latestModel().clusterWeights, [25.0])
+            self.assertEqual(stkm.latestModel().clusterWeights, [25.0])
             return True
         self._eventually(condition, catch_assertions=True)
 
@@ -1114,7 +1114,7 @@ class StreamingKMeansTest(MLLibStreamingTestCase):
         def condition():
             finalModel = stkm.latestModel()
             self.assertTrue(all(finalModel.centers == array(initCenters)))
-            self.assertEquals(finalModel.clusterWeights, [5.0, 5.0, 5.0, 5.0])
+            self.assertEqual(finalModel.clusterWeights, [5.0, 5.0, 5.0, 5.0])
             return True
         self._eventually(condition, catch_assertions=True)
 
@@ -1141,7 +1141,7 @@ class StreamingKMeansTest(MLLibStreamingTestCase):
         self.ssc.start()
 
         def condition():
-            self.assertEquals(result, [[0], [1], [2], [3]])
+            self.assertEqual(result, [[0], [1], [2], [3]])
             return True
 
         self._eventually(condition, catch_assertions=True)
@@ -1263,7 +1263,7 @@ class 
StreamingLogisticRegressionWithSGDTests(MLLibStreamingTestCase):
         self.ssc.start()
 
         def condition():
-            self.assertEquals(len(models), len(input_batches))
+            self.assertEqual(len(models), len(input_batches))
             return True
 
         # We want all batches to finish for this test.
@@ -1297,7 +1297,7 @@ class 
StreamingLogisticRegressionWithSGDTests(MLLibStreamingTestCase):
         self.ssc.start()
 
         def condition():
-            self.assertEquals(len(true_predicted), len(input_batches))
+            self.assertEqual(len(true_predicted), len(input_batches))
             return True
 
         self._eventually(condition, catch_assertions=True)
@@ -1400,7 +1400,7 @@ class 
StreamingLinearRegressionWithTests(MLLibStreamingTestCase):
         self.ssc.start()
 
         def condition():
-            self.assertEquals(len(model_weights), len(batches))
+            self.assertEqual(len(model_weights), len(batches))
             return True
 
         # We want all batches to finish for this test.
@@ -1433,7 +1433,7 @@ class 
StreamingLinearRegressionWithTests(MLLibStreamingTestCase):
         self.ssc.start()
 
         def condition():
-            self.assertEquals(len(samples), len(batches))
+            self.assertEqual(len(samples), len(batches))
             return True
 
         # We want all batches to finish for this test.

http://git-wip-us.apache.org/repos/asf/spark/blob/35e8ab93/python/pyspark/sql/tests.py
----------------------------------------------------------------------
diff --git a/python/pyspark/sql/tests.py b/python/pyspark/sql/tests.py
index f2172b7..3e680f1 100644
--- a/python/pyspark/sql/tests.py
+++ b/python/pyspark/sql/tests.py
@@ -157,7 +157,7 @@ class DataTypeTests(unittest.TestCase):
     def test_data_type_eq(self):
         lt = LongType()
         lt2 = pickle.loads(pickle.dumps(LongType()))
-        self.assertEquals(lt, lt2)
+        self.assertEqual(lt, lt2)
 
     # regression test for SPARK-7978
     def test_decimal_type(self):
@@ -393,7 +393,7 @@ class SQLTests(ReusedPySparkTestCase):
                                    CustomRow(field1=2, field2="row2"),
                                    CustomRow(field1=3, field2="row3")])
         df = self.sqlCtx.inferSchema(rdd)
-        self.assertEquals(Row(field1=1, field2=u'row1'), df.first())
+        self.assertEqual(Row(field1=1, field2=u'row1'), df.first())
 
     def test_create_dataframe_from_objects(self):
         data = [MyObject(1, "1"), MyObject(2, "2")]
@@ -403,7 +403,7 @@ class SQLTests(ReusedPySparkTestCase):
 
     def test_select_null_literal(self):
         df = self.sqlCtx.sql("select null as col")
-        self.assertEquals(Row(col=None), df.first())
+        self.assertEqual(Row(col=None), df.first())
 
     def test_apply_schema(self):
         from datetime import date, datetime
@@ -519,14 +519,14 @@ class SQLTests(ReusedPySparkTestCase):
                              StructField("point", ExamplePointUDT(), False)])
         df = self.sqlCtx.createDataFrame([row], schema)
         point = df.head().point
-        self.assertEquals(point, ExamplePoint(1.0, 2.0))
+        self.assertEqual(point, ExamplePoint(1.0, 2.0))
 
         row = (1.0, PythonOnlyPoint(1.0, 2.0))
         schema = StructType([StructField("label", DoubleType(), False),
                              StructField("point", PythonOnlyUDT(), False)])
         df = self.sqlCtx.createDataFrame([row], schema)
         point = df.head().point
-        self.assertEquals(point, PythonOnlyPoint(1.0, 2.0))
+        self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
 
     def test_udf_with_udt(self):
         from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
@@ -554,14 +554,14 @@ class SQLTests(ReusedPySparkTestCase):
         df0.write.parquet(output_dir)
         df1 = self.sqlCtx.parquetFile(output_dir)
         point = df1.head().point
-        self.assertEquals(point, ExamplePoint(1.0, 2.0))
+        self.assertEqual(point, ExamplePoint(1.0, 2.0))
 
         row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
         df0 = self.sqlCtx.createDataFrame([row])
         df0.write.parquet(output_dir, mode='overwrite')
         df1 = self.sqlCtx.parquetFile(output_dir)
         point = df1.head().point
-        self.assertEquals(point, PythonOnlyPoint(1.0, 2.0))
+        self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
 
     def test_column_operators(self):
         ci = self.df.key
@@ -826,8 +826,8 @@ class SQLTests(ReusedPySparkTestCase):
         output_dir = os.path.join(self.tempdir.name, "infer_long_type")
         df.saveAsParquetFile(output_dir)
         df1 = self.sqlCtx.parquetFile(output_dir)
-        self.assertEquals('a', df1.first().f1)
-        self.assertEquals(100000000000000, df1.first().f2)
+        self.assertEqual('a', df1.first().f1)
+        self.assertEqual(100000000000000, df1.first().f2)
 
         self.assertEqual(_infer_type(1), LongType())
         self.assertEqual(_infer_type(2**10), LongType())

http://git-wip-us.apache.org/repos/asf/spark/blob/35e8ab93/python/pyspark/streaming/tests.py
----------------------------------------------------------------------
diff --git a/python/pyspark/streaming/tests.py 
b/python/pyspark/streaming/tests.py
index cfea95b..e4e56ff 100644
--- a/python/pyspark/streaming/tests.py
+++ b/python/pyspark/streaming/tests.py
@@ -693,7 +693,7 @@ class CheckpointTests(unittest.TestCase):
 
         # Verify that getActiveOrCreate() returns active context
         self.setupCalled = False
-        self.assertEquals(StreamingContext.getActiveOrCreate(self.cpd, setup), 
self.ssc)
+        self.assertEqual(StreamingContext.getActiveOrCreate(self.cpd, setup), 
self.ssc)
         self.assertFalse(self.setupCalled)
 
         # Verify that getActiveOrCreate() uses existing SparkContext


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to