32
32
import com .facebook .presto .hive .HiveHdfsConfiguration ;
33
33
import com .facebook .presto .hive .MetastoreClientConfig ;
34
34
import com .facebook .presto .hive .authentication .NoHdfsAuthentication ;
35
+ import com .facebook .presto .hive .s3 .HiveS3Config ;
36
+ import com .facebook .presto .hive .s3 .PrestoS3ConfigurationUpdater ;
37
+ import com .facebook .presto .hive .s3 .S3ConfigurationUpdater ;
35
38
import com .facebook .presto .iceberg .delete .DeleteFile ;
36
39
import com .facebook .presto .metadata .CatalogMetadata ;
37
40
import com .facebook .presto .metadata .Metadata ;
63
66
import org .apache .hadoop .conf .Configuration ;
64
67
import org .apache .hadoop .fs .FileStatus ;
65
68
import org .apache .hadoop .fs .FileSystem ;
69
+ import org .apache .hadoop .fs .Path ;
66
70
import org .apache .iceberg .BaseTable ;
67
71
import org .apache .iceberg .CatalogUtil ;
68
72
import org .apache .iceberg .FileScanTask ;
96
100
import java .lang .reflect .Field ;
97
101
import java .net .URI ;
98
102
import java .nio .ByteBuffer ;
99
- import java .nio .file .Path ;
100
103
import java .time .LocalDateTime ;
101
104
import java .time .LocalTime ;
102
105
import java .time .format .DateTimeFormatter ;
@@ -1679,14 +1682,14 @@ public void testMetadataVersionsMaintainingProperties()
1679
1682
// Table `test_table_with_default_setting_properties`'s current metadata record all 5 previous metadata files
1680
1683
assertEquals (defaultTableMetadata .previousFiles ().size (), 5 );
1681
1684
1682
- FileSystem fileSystem = getHdfsEnvironment ().getFileSystem (new HdfsContext (SESSION ), new org . apache . hadoop . fs . Path (settingTable .location ()));
1685
+ FileSystem fileSystem = getHdfsEnvironment ().getFileSystem (new HdfsContext (SESSION ), new Path (settingTable .location ()));
1683
1686
1684
1687
// Table `test_table_with_setting_properties`'s all existing metadata files count is 2
1685
- FileStatus [] settingTableFiles = fileSystem .listStatus (new org . apache . hadoop . fs . Path (settingTable .location (), "metadata" ), name -> name .getName ().contains (METADATA_FILE_EXTENSION ));
1688
+ FileStatus [] settingTableFiles = fileSystem .listStatus (new Path (settingTable .location (), "metadata" ), name -> name .getName ().contains (METADATA_FILE_EXTENSION ));
1686
1689
assertEquals (settingTableFiles .length , 2 );
1687
1690
1688
1691
// Table `test_table_with_default_setting_properties`'s all existing metadata files count is 6
1689
- FileStatus [] defaultTableFiles = fileSystem .listStatus (new org . apache . hadoop . fs . Path (defaultTable .location (), "metadata" ), name -> name .getName ().contains (METADATA_FILE_EXTENSION ));
1692
+ FileStatus [] defaultTableFiles = fileSystem .listStatus (new Path (defaultTable .location (), "metadata" ), name -> name .getName ().contains (METADATA_FILE_EXTENSION ));
1690
1693
assertEquals (defaultTableFiles .length , 6 );
1691
1694
}
1692
1695
finally {
@@ -2261,12 +2264,12 @@ private void testCheckDeleteFiles(Table icebergTable, int expectedSize, List<Fil
2261
2264
private void writePositionDeleteToNationTable (Table icebergTable , String dataFilePath , long deletePos )
2262
2265
throws IOException
2263
2266
{
2264
- Path dataDirectory = getDistributedQueryRunner ().getCoordinator ().getDataDirectory ();
2267
+ java . nio . file . Path dataDirectory = getDistributedQueryRunner ().getCoordinator ().getDataDirectory ();
2265
2268
File metastoreDir = getIcebergDataDirectoryPath (dataDirectory , catalogType .name (), new IcebergConfig ().getFileFormat (), false ).toFile ();
2266
- org . apache . hadoop . fs . Path metadataDir = new org . apache . hadoop . fs . Path (metastoreDir .toURI ());
2269
+ Path metadataDir = new Path (metastoreDir .toURI ());
2267
2270
String deleteFileName = "delete_file_" + randomUUID ();
2268
2271
FileSystem fs = getHdfsEnvironment ().getFileSystem (new HdfsContext (SESSION ), metadataDir );
2269
- org . apache . hadoop . fs . Path path = new org . apache . hadoop . fs . Path (metadataDir , deleteFileName );
2272
+ Path path = new Path (metadataDir , deleteFileName );
2270
2273
PositionDeleteWriter <Record > writer = Parquet .writeDeletes (HadoopOutputFile .fromPath (path , fs ))
2271
2274
.createWriterFunc (GenericParquetWriter ::buildWriter )
2272
2275
.forTable (icebergTable )
@@ -2293,13 +2296,13 @@ private void writeEqualityDeleteToNationTable(Table icebergTable, Map<String, Ob
2293
2296
private void writeEqualityDeleteToNationTable (Table icebergTable , Map <String , Object > overwriteValues , Map <String , Object > partitionValues )
2294
2297
throws Exception
2295
2298
{
2296
- Path dataDirectory = getDistributedQueryRunner ().getCoordinator ().getDataDirectory ();
2299
+ java . nio . file . Path dataDirectory = getDistributedQueryRunner ().getCoordinator ().getDataDirectory ();
2297
2300
File metastoreDir = getIcebergDataDirectoryPath (dataDirectory , catalogType .name (), new IcebergConfig ().getFileFormat (), false ).toFile ();
2298
- org . apache . hadoop . fs . Path metadataDir = new org . apache . hadoop . fs . Path (metastoreDir .toURI ());
2301
+ Path metadataDir = new Path (metastoreDir .toURI ());
2299
2302
String deleteFileName = "delete_file_" + randomUUID ();
2300
2303
FileSystem fs = getHdfsEnvironment ().getFileSystem (new HdfsContext (SESSION ), metadataDir );
2301
2304
Schema deleteRowSchema = icebergTable .schema ().select (overwriteValues .keySet ());
2302
- Parquet .DeleteWriteBuilder writerBuilder = Parquet .writeDeletes (HadoopOutputFile .fromPath (new org . apache . hadoop . fs . Path (metadataDir , deleteFileName ), fs ))
2305
+ Parquet .DeleteWriteBuilder writerBuilder = Parquet .writeDeletes (HadoopOutputFile .fromPath (new Path (metadataDir , deleteFileName ), fs ))
2303
2306
.forTable (icebergTable )
2304
2307
.rowSchema (deleteRowSchema )
2305
2308
.createWriterFunc (GenericParquetWriter ::buildWriter )
@@ -2320,13 +2323,19 @@ private void writeEqualityDeleteToNationTable(Table icebergTable, Map<String, Ob
2320
2323
icebergTable .newRowDelta ().addDeletes (writer .toDeleteFile ()).commit ();
2321
2324
}
2322
2325
2323
- public static HdfsEnvironment getHdfsEnvironment ()
2326
+ protected HdfsEnvironment getHdfsEnvironment ()
2324
2327
{
2325
2328
HiveClientConfig hiveClientConfig = new HiveClientConfig ();
2326
2329
MetastoreClientConfig metastoreClientConfig = new MetastoreClientConfig ();
2327
- HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration (new HdfsConfigurationInitializer (hiveClientConfig , metastoreClientConfig ),
2328
- ImmutableSet .of (),
2329
- hiveClientConfig );
2330
+ HiveS3Config hiveS3Config = new HiveS3Config ();
2331
+ return getHdfsEnvironment (hiveClientConfig , metastoreClientConfig , hiveS3Config );
2332
+ }
2333
+
2334
+ public static HdfsEnvironment getHdfsEnvironment (HiveClientConfig hiveClientConfig , MetastoreClientConfig metastoreClientConfig , HiveS3Config hiveS3Config )
2335
+ {
2336
+ S3ConfigurationUpdater s3ConfigurationUpdater = new PrestoS3ConfigurationUpdater (hiveS3Config );
2337
+ HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration (new HdfsConfigurationInitializer (hiveClientConfig , metastoreClientConfig , s3ConfigurationUpdater , ignored -> {}),
2338
+ ImmutableSet .of (), hiveClientConfig );
2330
2339
return new HdfsEnvironment (hdfsConfiguration , metastoreClientConfig , new NoHdfsAuthentication ());
2331
2340
}
2332
2341
@@ -2348,18 +2357,18 @@ protected Table loadTable(String tableName)
2348
2357
2349
2358
protected Map <String , String > getProperties ()
2350
2359
{
2351
- File metastoreDir = getCatalogDirectory ();
2360
+ Path metastoreDir = getCatalogDirectory ();
2352
2361
return ImmutableMap .of ("warehouse" , metastoreDir .toString ());
2353
2362
}
2354
2363
2355
- protected File getCatalogDirectory ()
2364
+ protected Path getCatalogDirectory ()
2356
2365
{
2357
- Path dataDirectory = getDistributedQueryRunner ().getCoordinator ().getDataDirectory ();
2366
+ java . nio . file . Path dataDirectory = getDistributedQueryRunner ().getCoordinator ().getDataDirectory ();
2358
2367
switch (catalogType ) {
2359
2368
case HIVE :
2360
2369
case HADOOP :
2361
2370
case NESSIE :
2362
- return getIcebergDataDirectoryPath (dataDirectory , catalogType .name (), new IcebergConfig ().getFileFormat (), false ).toFile ();
2371
+ return new Path ( getIcebergDataDirectoryPath (dataDirectory , catalogType .name (), new IcebergConfig ().getFileFormat (), false ).toFile (). toURI () );
2363
2372
}
2364
2373
2365
2374
throw new PrestoException (NOT_SUPPORTED , "Unsupported Presto Iceberg catalog type " + catalogType );
0 commit comments