Skip to content

Commit 51c7a9c

Browse files
author
Sumedh Wale
committed
fixing unit test failures
1 parent e9526ee commit 51c7a9c

7 files changed

Lines changed: 73 additions & 72 deletions

File tree

cluster/src/dunit/scala/io/snappydata/cluster/SplitSnappyClusterDUnitTest.scala

Lines changed: 57 additions & 57 deletions
Original file line numberDiff line numberDiff line change
@@ -26,13 +26,15 @@ import scala.concurrent.{Await, Future}
2626
import scala.language.postfixOps
2727
import scala.reflect.io.Path
2828
import scala.util.{Failure, Success, Try}
29+
2930
import com.gemstone.gemfire.internal.cache.PartitionedRegion
3031
import com.pivotal.gemfirexd.internal.engine.Misc
3132
import io.snappydata.core.{TestData, TestData2}
32-
import io.snappydata.test.dunit.{AvailablePortHelper, SerializableRunnable}
33+
import io.snappydata.test.dunit.{AvailablePortHelper, DistributedTestBase, SerializableRunnable}
3334
import io.snappydata.util.TestUtils
3435
import io.snappydata.{ColumnUpdateDeleteTests, ConcurrentOpsTests, Property, SnappyTableStatsProviderService}
3536
import org.junit.Assert
37+
3638
import org.apache.spark.rdd.RDD
3739
import org.apache.spark.sql._
3840
import org.apache.spark.sql.catalyst.encoders.{ExpressionEncoder, RowEncoder}
@@ -254,7 +256,7 @@ class SplitSnappyClusterDUnitTest(s: String)
254256
if (jars.count() > 0) {
255257
var str = msg
256258
jars.collect().foreach(x => str += s"$x,")
257-
assert(false, str)
259+
assert(assertion = false, str)
258260
}
259261
}
260262

@@ -400,12 +402,12 @@ class SplitSnappyClusterDUnitTest(s: String)
400402

401403
try {
402404
// wait till the smart connector job perform at-least one putInto operation
403-
var count = 0
404-
while (snc.table("T5").count() == 0 && count < 10) {
405-
Thread.sleep(4000)
406-
count += 1
407-
}
408-
assert(count != 10, "Smart connector application not performing putInto as expected.")
405+
DistributedTestBase.waitForCriterion(new DistributedTestBase.WaitCriterion {
406+
407+
override def description(): String = "waiting for putInto from smart connector"
408+
409+
override def done(): Boolean = snc.table("T5").count() > 0
410+
}, 60000, 500, true)
409411

410412
// perform DDL
411413
snc.sql(s"CREATE TABLE T6(COL1 STRING, COL2 STRING) " +
@@ -435,12 +437,12 @@ class SplitSnappyClusterDUnitTest(s: String)
435437
startArgs :+ Int.box(locatorClientPort) :+ testTempDirectory)
436438
}
437439
try {
438-
var attempts = 0
439-
while (!Files.exists(Paths.get(testTempDirectory, "file0")) && attempts < 15) {
440-
Thread.sleep(4000)
441-
attempts += 1
442-
}
443-
assert(attempts < 14, "No data ingested by streaming application.")
440+
DistributedTestBase.waitForCriterion(new DistributedTestBase.WaitCriterion {
441+
442+
override def description(): String = "no data ingested by streaming application"
443+
444+
override def done(): Boolean = Files.exists(Paths.get(testTempDirectory, "file0"))
445+
}, 60000, 500, true)
444446

445447
// perform DDL leading to stale catalog in smart connector application
446448
snc.sql(s"CREATE TABLE SYNC_TABLE(COL1 STRING) " + s"USING column")
@@ -470,12 +472,12 @@ class SplitSnappyClusterDUnitTest(s: String)
470472

471473
try {
472474
// wait till the smart connector job perform at-least one putInto operation
473-
var count = 0
474-
while (snc.table("T5").count() == 3 && count < 10) {
475-
Thread.sleep(4000)
476-
count += 1
477-
}
478-
assert(count != 10, "Smart connector application not performing putInto as expected.")
475+
DistributedTestBase.waitForCriterion(new DistributedTestBase.WaitCriterion {
476+
477+
override def description(): String = "waiting for putInto from smart connector"
478+
479+
override def done(): Boolean = snc.table("T5").count() != 3
480+
}, 60000, 500, true)
479481

480482
// perform DDL
481483
snc.sql(s"CREATE TABLE T6(COL1 STRING, COL2 STRING) " +
@@ -504,12 +506,12 @@ class SplitSnappyClusterDUnitTest(s: String)
504506

505507
try {
506508
// wait till the smart connector job perform at-least one putInto operation
507-
var count = 0
508-
while (snc.table("T5").count() == 3 && count < 10) {
509-
Thread.sleep(4000)
510-
count += 1
511-
}
512-
assert(count != 10, "Smart connector application not performing putInto as expected.")
509+
DistributedTestBase.waitForCriterion(new DistributedTestBase.WaitCriterion {
510+
511+
override def description(): String = "waiting for putInto from smart connector"
512+
513+
override def done(): Boolean = snc.table("T5").count() != 3
514+
}, 60000, 500, true)
513515

514516
// rebalance the buckets
515517
snc.sql(s"CALL SYS.REBALANCE_ALL_BUCKETS()")
@@ -551,12 +553,12 @@ class SplitSnappyClusterDUnitTest(s: String)
551553

552554
try {
553555
// wait till the smart connector job perform at-least one putInto operation
554-
var count = 0
555-
while (snc.table("T5").count() == 3 && count < 10) {
556-
Thread.sleep(4000)
557-
count += 1
558-
}
559-
assert(count != 10, "Smart connector application not performing insert as expected.")
556+
DistributedTestBase.waitForCriterion(new DistributedTestBase.WaitCriterion {
557+
558+
override def description(): String = "waiting for insertInto from smart connector"
559+
560+
override def done(): Boolean = snc.table("T5").count() != 3
561+
}, 60000, 500, true)
560562

561563
logInfo("testInsertQueryAfterStaleCatalog dropping table t5")
562564
// drop the table and create a table with same name and different schema
@@ -592,12 +594,12 @@ class SplitSnappyClusterDUnitTest(s: String)
592594

593595
try {
594596
// wait till the smart connector job perform at-least one putInto operation
595-
var count = 0
596-
while (snc.table("T6").count() == 3 && count < 10) {
597-
Thread.sleep(4000)
598-
count += 1
599-
}
600-
assert(count != 10, "Smart connector application not performing delete as expected.")
597+
DistributedTestBase.waitForCriterion(new DistributedTestBase.WaitCriterion {
598+
599+
override def description(): String = "waiting for delete from smart connector"
600+
601+
override def done(): Boolean = snc.table("T6").count() != 3
602+
}, 60000, 500, true)
601603

602604
logInfo("testDeleteAfterStaleCatalog dropping table t6")
603605
snc.sql("drop table t6")
@@ -628,12 +630,12 @@ class SplitSnappyClusterDUnitTest(s: String)
628630

629631
try {
630632
// wait till the smart connector job perform at-least one putInto operation
631-
var count = 0
632-
while (snc.table("T7").count() == 3 && count < 10) {
633-
Thread.sleep(4000)
634-
count += 1
635-
}
636-
assert(count != 10, "Smart connector application not performing delete as expected.")
633+
DistributedTestBase.waitForCriterion(new DistributedTestBase.WaitCriterion {
634+
635+
override def description(): String = "waiting for delete from smart connector"
636+
637+
override def done(): Boolean = snc.table("T7").count() != 3
638+
}, 60000, 500, true)
637639

638640
snc.sql(s"CREATE TABLE T8(COL1 DATE, COL2 DATE) USING column OPTIONS" +
639641
s" (key_columns 'COL1', PARTITION_BY 'COL1', COLUMN_MAX_DELTA_ROWS '1')")
@@ -665,7 +667,7 @@ object SplitSnappyClusterDUnitTest
665667
s"cached Hive catalog")
666668
} catch {
667669
// expected exception
668-
case _: org.apache.spark.sql.TableNotFoundException =>
670+
case _: AnalysisException =>
669671
}
670672
}
671673

@@ -1289,7 +1291,7 @@ object SplitSnappyClusterDUnitTest
12891291
Thread.sleep(6000)
12901292
try {
12911293
for (_ <- 1 to 20) {
1292-
Thread.sleep(500)
1294+
Thread.sleep(200)
12931295
logInfo("calling dataFrame.write.insertInto(\"T5\")")
12941296
logInfo("2. schema is = " + snc.table("T5").schema)
12951297
dataFrame2.write.insertInto("T5")
@@ -1315,9 +1317,8 @@ object SplitSnappyClusterDUnitTest
13151317
success = true
13161318
} catch {
13171319
// if table is not created yet on embedded cluster,
1318-
// TableNotFoundException can be seen; retry in
1319-
// such a case
1320-
case t: TableNotFoundException =>
1320+
// table may not be found ; retry in such a case
1321+
case t: AnalysisException =>
13211322
retryCount = retryCount + 1
13221323
if (retryCount == maxRetryAttempts) {
13231324
throw t
@@ -1338,7 +1339,7 @@ object SplitSnappyClusterDUnitTest
13381339
Thread.sleep(6000)
13391340
try {
13401341
for (_ <- 1 to 20) {
1341-
Thread.sleep(500)
1342+
Thread.sleep(200)
13421343
snc.sql("delete from t6 where col1 like '2%'")
13431344
}
13441345
Assert.fail("Should have thrown CatalogStaleException.")
@@ -1362,7 +1363,7 @@ object SplitSnappyClusterDUnitTest
13621363
Thread.sleep(6000)
13631364
try {
13641365
for (_ <- 1 to 20) {
1365-
Thread.sleep(500)
1366+
Thread.sleep(200)
13661367
snc.sql("update t7 set col2 = '22' where col1 = '2'")
13671368
}
13681369
Assert.fail("Should have thrown CatalogStaleException.")
@@ -1424,13 +1425,12 @@ object SplitSnappyClusterDUnitTest
14241425
}
14251426

14261427
// wait till DDL is fired on snappy cluster which will lead to stale smart-connector catalog
1427-
var attempts = 0
1428-
while (!Files.exists(Paths.get(testTempDir, "file1")) && attempts < 15) {
1429-
Thread.sleep(4000)
1430-
attempts += 1
1431-
}
1428+
DistributedTestBase.waitForCriterion(new DistributedTestBase.WaitCriterion {
1429+
1430+
override def description(): String = "waiting for stale catalog timed out"
14321431

1433-
assert(attempts < 14, "Waiting for stale catalog timed out")
1432+
override def done(): Boolean = Files.exists(Paths.get(testTempDir, "file1"))
1433+
}, 60000, 500, true)
14341434

14351435
// produce second batch of data
14361436
val dataBatch2 = Seq(Seq(3, "name3", 20))

cluster/src/dunit/scala/io/snappydata/externalstore/CatalogConsistencyDUnitTest.scala

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ import io.snappydata.test.dunit.AvailablePortHelper
2323

2424
import org.apache.spark.sql.execution.columnar.impl.ColumnFormatRelation
2525
import org.apache.spark.sql.sources.JdbcExtendedUtils
26-
import org.apache.spark.sql.{AnalysisException, SaveMode, SnappyContext, TableNotFoundException}
26+
import org.apache.spark.sql.{AnalysisException, SaveMode, SnappyContext}
2727

2828
/**
2929
* Some basic tests to detect catalog inconsistency and repair it
@@ -71,8 +71,7 @@ class CatalogConsistencyDUnitTest(s: String) extends ClusterManagerTestBase(s) {
7171
snc.snappySession.sessionCatalog.lookupRelation(
7272
snc.snappySession.tableIdentifier("column_table1"))
7373
} catch {
74-
case t: TableNotFoundException => // expected exception
75-
case unknown: Throwable => throw unknown
74+
case _: AnalysisException => // expected exception
7675
}
7776

7877
val routeQueryDisabledConn = getClientConnection(netPort1, false)
@@ -115,8 +114,7 @@ class CatalogConsistencyDUnitTest(s: String) extends ClusterManagerTestBase(s) {
115114
snc.snappySession.sessionCatalog.lookupRelation(
116115
snc.snappySession.tableIdentifier("column_table1"))
117116
} catch {
118-
case t: TableNotFoundException => // expected exception
119-
case unknown: Throwable => throw unknown
117+
case _: AnalysisException => // expected exception
120118
}
121119

122120
val connection = getClientConnection(netPort1)

cluster/src/test/scala/org/apache/spark/memory/SnappyMemoryAccountingSuite.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -586,7 +586,7 @@ class SnappyMemoryAccountingSuite extends MemoryFunSuite {
586586
}
587587

588588
// wait a lot
589-
awaitAll(20000000L, tasks: _*)
589+
awaitAll(200000L, tasks: _*)
590590

591591
// Rough estimation of 120 bytes per row
592592
assert(SparkEnv.get.memoryManager.storageMemoryUsed >= 120 * 100 * 5 )

core/src/dunit/scala/io/snappydata/cluster/SplitClusterDUnitSecurityTest.scala

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ import org.apache.commons.io.FileUtils
3636

3737
import org.apache.spark.SparkUtilsAccess
3838
import org.apache.spark.sql.types.{IntegerType, StructField}
39-
import org.apache.spark.sql.{ParseException, Row, SnappyContext, SnappySession, TableNotFoundException}
39+
import org.apache.spark.sql.{AnalysisException, ParseException, Row, SnappyContext, SnappySession}
4040

4141
class SplitClusterDUnitSecurityTest(s: String)
4242
extends DistributedTestBase(s)
@@ -382,9 +382,9 @@ class SplitClusterDUnitSecurityTest(s: String)
382382
private def assertTableDeleted(func: () => Unit, t: String): Unit = {
383383
try {
384384
func()
385-
assert(false, s"Failed to drop $t")
385+
assert(assertion = false, s"Failed to drop $t")
386386
} catch {
387-
case te: TableNotFoundException =>
387+
case _: AnalysisException =>
388388
}
389389
}
390390

@@ -456,7 +456,7 @@ class SplitClusterDUnitSecurityTest(s: String)
456456

457457
// All DMLs from another user should fail
458458
def assertFailure(sql: () => Unit, s: String): Unit = {
459-
val states = Seq("42502", "42500")
459+
val states = Seq("42502", "42500", "does not have INSERT permission on table")
460460
assertFailures(sql, s, states)
461461
}
462462

core/src/test/scala/org/apache/spark/sql/store/ColumnTableTest.scala

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616
*/
1717
package org.apache.spark.sql.store
1818

19-
import java.sql.{DriverManager, SQLException}
19+
import java.sql.DriverManager
2020

2121
import scala.util.{Failure, Success, Try}
2222

@@ -182,8 +182,11 @@ class ColumnTableTest
182182
"using column options()")
183183
try {
184184
snc.sql("insert into coltab values (1, 2)")
185+
fail("expected insert to fail")
185186
} catch {
186-
case ex: SQLException => assert("42802".equals(ex.getSQLState))
187+
// check expected exception message
188+
case ae: AnalysisException if ae.getMessage.contains(
189+
"the number of columns are different: need 1 columns, but query has 2 columns.") =>
187190
}
188191
snc.sql("drop table coltab")
189192
}

core/src/test/scala/org/apache/spark/sql/store/MetadataTest.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ import org.scalatest.Assertions
2828
import org.apache.spark.sql.collection.Utils
2929
import org.apache.spark.sql.execution.columnar.impl.ColumnPartitionResolver
3030
import org.apache.spark.sql.types._
31-
import org.apache.spark.sql.{AnalysisException, Dataset, Row, TableNotFoundException}
31+
import org.apache.spark.sql.{AnalysisException, Dataset, Row}
3232

3333
/**
3434
* Tests for meta-data queries using Spark SQL.
@@ -446,7 +446,7 @@ object MetadataTest extends Assertions {
446446
rs = executeSQL("show columns in sysTables from app").collect()
447447
fail("Expected error due to non-existent table")
448448
} catch {
449-
case _: TableNotFoundException => // expected
449+
case _: AnalysisException => // expected
450450
case se: SQLException if se.getSQLState == "42000" => // expected
451451
}
452452
try {

store

Submodule store updated from ba7e579 to be15387

0 commit comments

Comments
 (0)