@@ -67,9 +67,9 @@ class AsyncProgressTrackingMicroBatchExecutionSuite
67
67
68
68
class MemoryStreamCapture [A : Encoder ](
69
69
id : Int ,
70
- sqlContext : SQLContext ,
70
+ sparkSession : SparkSession ,
71
71
numPartitions : Option [Int ] = None )
72
- extends MemoryStream [A ](id, sqlContext , numPartitions = numPartitions) {
72
+ extends MemoryStream [A ](id, sparkSession , numPartitions = numPartitions) {
73
73
74
74
val commits = new ListBuffer [streaming.Offset ]()
75
75
val commitThreads = new ListBuffer [Thread ]()
@@ -136,7 +136,7 @@ class AsyncProgressTrackingMicroBatchExecutionSuite
136
136
test(" async WAL commits recovery" ) {
137
137
val checkpointLocation = Utils .createTempDir(namePrefix = " streaming.metadata" ).getCanonicalPath
138
138
139
- val inputData = new MemoryStream [Int ](id = 0 , sqlContext = sqlContext )
139
+ val inputData = new MemoryStream [Int ](id = 0 , spark )
140
140
val ds = inputData.toDF()
141
141
142
142
var index = 0
@@ -204,7 +204,7 @@ class AsyncProgressTrackingMicroBatchExecutionSuite
204
204
}
205
205
206
206
test(" async WAL commits turn on and off" ) {
207
- val inputData = new MemoryStream [Int ](id = 0 , sqlContext = sqlContext )
207
+ val inputData = new MemoryStream [Int ](id = 0 , spark )
208
208
val ds = inputData.toDS()
209
209
210
210
val checkpointLocation = Utils .createTempDir(namePrefix = " streaming.metadata" ).getCanonicalPath
@@ -308,7 +308,7 @@ class AsyncProgressTrackingMicroBatchExecutionSuite
308
308
}
309
309
310
310
test(" Fail with once trigger" ) {
311
- val inputData = new MemoryStream [Int ](id = 0 , sqlContext = sqlContext )
311
+ val inputData = new MemoryStream [Int ](id = 0 , spark )
312
312
val ds = inputData.toDF()
313
313
314
314
val e = intercept[IllegalArgumentException ] {
@@ -323,7 +323,7 @@ class AsyncProgressTrackingMicroBatchExecutionSuite
323
323
324
324
test(" Fail with available now trigger" ) {
325
325
326
- val inputData = new MemoryStream [Int ](id = 0 , sqlContext = sqlContext )
326
+ val inputData = new MemoryStream [Int ](id = 0 , spark )
327
327
val ds = inputData.toDF()
328
328
329
329
val e = intercept[IllegalArgumentException ] {
@@ -339,7 +339,7 @@ class AsyncProgressTrackingMicroBatchExecutionSuite
339
339
test(" switching between async wal commit enabled and trigger once" ) {
340
340
val checkpointLocation = Utils .createTempDir(namePrefix = " streaming.metadata" ).getCanonicalPath
341
341
342
- val inputData = new MemoryStream [Int ](id = 0 , sqlContext = sqlContext )
342
+ val inputData = new MemoryStream [Int ](id = 0 , spark )
343
343
val ds = inputData.toDF()
344
344
345
345
var index = 0
@@ -500,7 +500,7 @@ class AsyncProgressTrackingMicroBatchExecutionSuite
500
500
test(" switching between async wal commit enabled and available now" ) {
501
501
val checkpointLocation = Utils .createTempDir(namePrefix = " streaming.metadata" ).getCanonicalPath
502
502
503
- val inputData = new MemoryStream [Int ](id = 0 , sqlContext = sqlContext )
503
+ val inputData = new MemoryStream [Int ](id = 0 , spark )
504
504
val ds = inputData.toDF()
505
505
506
506
var index = 0
@@ -669,7 +669,7 @@ class AsyncProgressTrackingMicroBatchExecutionSuite
669
669
}
670
670
671
671
def testAsyncWriteErrorsAlreadyExists (path : String ): Unit = {
672
- val inputData = new MemoryStream [Int ](id = 0 , sqlContext = sqlContext )
672
+ val inputData = new MemoryStream [Int ](id = 0 , spark )
673
673
val ds = inputData.toDS()
674
674
val checkpointLocation = Utils .createTempDir(namePrefix = " streaming.metadata" ).getCanonicalPath
675
675
@@ -720,7 +720,7 @@ class AsyncProgressTrackingMicroBatchExecutionSuite
720
720
}
721
721
722
722
def testAsyncWriteErrorsPermissionsIssue (path : String ): Unit = {
723
- val inputData = new MemoryStream [Int ](id = 0 , sqlContext = sqlContext )
723
+ val inputData = new MemoryStream [Int ](id = 0 , spark )
724
724
val ds = inputData.toDS()
725
725
val checkpointLocation = Utils .createTempDir(namePrefix = " streaming.metadata" ).getCanonicalPath
726
726
val commitDir = new File (checkpointLocation + path)
@@ -778,7 +778,7 @@ class AsyncProgressTrackingMicroBatchExecutionSuite
778
778
779
779
val checkpointLocation = Utils .createTempDir(namePrefix = " streaming.metadata" ).getCanonicalPath
780
780
781
- val inputData = new MemoryStreamCapture [Int ](id = 0 , sqlContext = sqlContext )
781
+ val inputData = new MemoryStreamCapture [Int ](id = 0 , spark )
782
782
783
783
val ds = inputData.toDF()
784
784
@@ -852,7 +852,7 @@ class AsyncProgressTrackingMicroBatchExecutionSuite
852
852
}
853
853
854
854
test(" interval commits and recovery" ) {
855
- val inputData = new MemoryStreamCapture [Int ](id = 0 , sqlContext = sqlContext )
855
+ val inputData = new MemoryStreamCapture [Int ](id = 0 , spark )
856
856
val ds = inputData.toDS()
857
857
858
858
val checkpointLocation = Utils .createTempDir(namePrefix = " streaming.metadata" ).getCanonicalPath
@@ -934,7 +934,7 @@ class AsyncProgressTrackingMicroBatchExecutionSuite
934
934
}
935
935
936
936
test(" recovery when first offset is not zero and not commit log entries" ) {
937
- val inputData = new MemoryStreamCapture [Int ](id = 0 , sqlContext = sqlContext )
937
+ val inputData = new MemoryStreamCapture [Int ](id = 0 , spark )
938
938
val ds = inputData.toDS()
939
939
940
940
val checkpointLocation = Utils .createTempDir(namePrefix = " streaming.metadata" ).getCanonicalPath
@@ -961,7 +961,7 @@ class AsyncProgressTrackingMicroBatchExecutionSuite
961
961
/**
962
962
* start new stream
963
963
*/
964
- val inputData2 = new MemoryStreamCapture [Int ](id = 0 , sqlContext = sqlContext )
964
+ val inputData2 = new MemoryStreamCapture [Int ](id = 0 , spark )
965
965
val ds2 = inputData2.toDS()
966
966
testStream(ds2, extraOptions = Map (
967
967
ASYNC_PROGRESS_TRACKING_ENABLED -> " true" ,
@@ -995,7 +995,7 @@ class AsyncProgressTrackingMicroBatchExecutionSuite
995
995
}
996
996
997
997
test(" recovery non-contiguous log" ) {
998
- val inputData = new MemoryStreamCapture [Int ](id = 0 , sqlContext = sqlContext )
998
+ val inputData = new MemoryStreamCapture [Int ](id = 0 , spark )
999
999
val ds = inputData.toDS()
1000
1000
1001
1001
val checkpointLocation = Utils .createTempDir(namePrefix = " streaming.metadata" ).getCanonicalPath
@@ -1088,7 +1088,7 @@ class AsyncProgressTrackingMicroBatchExecutionSuite
1088
1088
}
1089
1089
1090
1090
test(" Fail on pipelines using unsupported sinks" ) {
1091
- val inputData = new MemoryStream [Int ](id = 0 , sqlContext = sqlContext )
1091
+ val inputData = new MemoryStream [Int ](id = 0 , spark )
1092
1092
val ds = inputData.toDF()
1093
1093
1094
1094
val e = intercept[IllegalArgumentException ] {
@@ -1109,7 +1109,7 @@ class AsyncProgressTrackingMicroBatchExecutionSuite
1109
1109
1110
1110
withSQLConf(SQLConf .MIN_BATCHES_TO_RETAIN .key -> " 2" , SQLConf .ASYNC_LOG_PURGE .key -> " false" ) {
1111
1111
withTempDir { checkpointLocation =>
1112
- val inputData = new MemoryStreamCapture [Int ](id = 0 , sqlContext = sqlContext )
1112
+ val inputData = new MemoryStreamCapture [Int ](id = 0 , spark )
1113
1113
val ds = inputData.toDS()
1114
1114
1115
1115
val clock = new StreamManualClock
@@ -1243,7 +1243,7 @@ class AsyncProgressTrackingMicroBatchExecutionSuite
1243
1243
test(" with async log purging" ) {
1244
1244
withSQLConf(SQLConf .MIN_BATCHES_TO_RETAIN .key -> " 2" , SQLConf .ASYNC_LOG_PURGE .key -> " true" ) {
1245
1245
withTempDir { checkpointLocation =>
1246
- val inputData = new MemoryStreamCapture [Int ](id = 0 , sqlContext = sqlContext )
1246
+ val inputData = new MemoryStreamCapture [Int ](id = 0 , spark )
1247
1247
val ds = inputData.toDS()
1248
1248
1249
1249
val clock = new StreamManualClock
@@ -1381,7 +1381,7 @@ class AsyncProgressTrackingMicroBatchExecutionSuite
1381
1381
}
1382
1382
1383
1383
test(" test multiple gaps in offset and commit logs" ) {
1384
- val inputData = new MemoryStreamCapture [Int ](id = 0 , sqlContext = sqlContext )
1384
+ val inputData = new MemoryStreamCapture [Int ](id = 0 , spark )
1385
1385
val ds = inputData.toDS()
1386
1386
1387
1387
val checkpointLocation = Utils .createTempDir(namePrefix = " streaming.metadata" ).getCanonicalPath
@@ -1427,7 +1427,7 @@ class AsyncProgressTrackingMicroBatchExecutionSuite
1427
1427
/**
1428
1428
* start new stream
1429
1429
*/
1430
- val inputData2 = new MemoryStreamCapture [Int ](id = 0 , sqlContext = sqlContext )
1430
+ val inputData2 = new MemoryStreamCapture [Int ](id = 0 , spark )
1431
1431
val ds2 = inputData2.toDS()
1432
1432
testStream(ds2, extraOptions = Map (
1433
1433
ASYNC_PROGRESS_TRACKING_ENABLED -> " true" ,
@@ -1460,7 +1460,7 @@ class AsyncProgressTrackingMicroBatchExecutionSuite
1460
1460
}
1461
1461
1462
1462
test(" recovery when gaps exist in offset and commit log" ) {
1463
- val inputData = new MemoryStreamCapture [Int ](id = 0 , sqlContext = sqlContext )
1463
+ val inputData = new MemoryStreamCapture [Int ](id = 0 , spark )
1464
1464
val ds = inputData.toDS()
1465
1465
1466
1466
val checkpointLocation = Utils .createTempDir(namePrefix = " streaming.metadata" ).getCanonicalPath
@@ -1494,7 +1494,7 @@ class AsyncProgressTrackingMicroBatchExecutionSuite
1494
1494
/**
1495
1495
* start new stream
1496
1496
*/
1497
- val inputData2 = new MemoryStreamCapture [Int ](id = 0 , sqlContext = sqlContext )
1497
+ val inputData2 = new MemoryStreamCapture [Int ](id = 0 , spark )
1498
1498
val ds2 = inputData2.toDS()
1499
1499
testStream(ds2, extraOptions = Map (
1500
1500
ASYNC_PROGRESS_TRACKING_ENABLED -> " true" ,
0 commit comments