diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..a3e9482 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,37 @@ +### A CI workflow template that runs linting and python testing +### TODO: Modify as needed or as desired. + +name: Test target-redshift + +on: + pull_request: + types: ["opened", "synchronize", "reopened"] + +jobs: + linter: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: chartboost/ruff-action@v1 + # pytest: + # runs-on: ubuntu-latest + # env: + # GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} + # strategy: + # matrix: + # python-version: ["3.8", "3.9", "3.10", "3.11"] + # steps: + # - uses: actions/checkout@v3 + # - name: Set up Python ${{ matrix.python-version }} + # uses: actions/setup-python@v4 + # with: + # python-version: ${{ matrix.python-version }} + # - name: Install Poetry + # run: | + # pip install poetry + # - name: Install dependencies + # run: | + # poetry install + # - name: Test with pytest + # run: | + # poetry run pytest diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml deleted file mode 100644 index 78a70de..0000000 --- a/.github/workflows/test.yml +++ /dev/null @@ -1,30 +0,0 @@ -### A CI workflow template that runs linting and python testing -### TODO: Modify as needed or as desired. - -name: Test target-redshift - -on: [push] - -jobs: - pytest: - runs-on: ubuntu-latest - env: - GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} - strategy: - matrix: - python-version: ["3.7", "3.8", "3.9", "3.10", "3.11"] - steps: - - uses: actions/checkout@v3 - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.python-version }} - - name: Install Poetry - run: | - pip install poetry - - name: Install dependencies - run: | - poetry install - - name: Test with pytest - run: | - poetry run pytest diff --git a/target_redshift/connector.py b/target_redshift/connector.py index 769625e..6596723 100644 --- a/target_redshift/connector.py +++ b/target_redshift/connector.py @@ -1,32 +1,28 @@ """Common SQL connectors for Streams and Sinks.""" from __future__ import annotations + import typing as t +from contextlib import contextmanager from typing import cast -from contextlib import contextmanager import boto3 - -from singer_sdk.typing import _jsonschema_type_check -from singer_sdk import typing as th +import redshift_connector +from redshift_connector import Cursor from singer_sdk.connectors import SQLConnector from singer_sdk.helpers._typing import get_datelike_property_type -from redshift_connector import Cursor -import redshift_connector - +from singer_sdk.typing import _jsonschema_type_check +from sqlalchemy import DDL, Column, MetaData, Table from sqlalchemy.engine.url import URL -from sqlalchemy_redshift.dialect import SUPER, BIGINT, VARCHAR, DOUBLE_PRECISION +from sqlalchemy.schema import CreateSchema, CreateTable, DropTable from sqlalchemy.types import ( BOOLEAN, DATE, DATETIME, - DECIMAL, TIME, - VARCHAR, + TypeEngine, ) -from sqlalchemy.schema import CreateTable, DropTable, CreateSchema -from sqlalchemy.types import TypeEngine -from sqlalchemy import Table, MetaData, DDL, Column +from sqlalchemy_redshift.dialect import BIGINT, DOUBLE_PRECISION, SUPER, VARCHAR class RedshiftConnector(SQLConnector): @@ -44,6 +40,7 @@ def prepare_schema(self, schema_name: str, cursor: Cursor) -> None: Args: schema_name: The target schema name. + cursor: The database cursor. """ schema_exists = self.schema_exists(schema_name) if not schema_exists: @@ -54,11 +51,24 @@ def create_schema(self, schema_name: str, cursor: Cursor) -> None: Args: schema_name: The target schema to create. + cursor: The database cursor. """ cursor.execute(str(CreateSchema(schema_name))) @contextmanager - def _connect_cursor(self) -> t.Iterator[Cursor]: + def connect_cursor(self) -> t.Iterator[Cursor]: + """Connect to a redshift connector cursor. + + Returns: + ------- + t.Iterator[Cursor] + A redshift connector cursor. + + Yields: + ------ + Iterator[t.Iterator[Cursor]] + A redshift connector cursor. + """ user, password = self.get_credentials() with redshift_connector.connect( user=user, @@ -71,14 +81,13 @@ def _connect_cursor(self) -> t.Iterator[Cursor]: yield cursor connection.commit() - def prepare_table( # type: ignore[override] + def prepare_table( # type: ignore[override] # noqa: D417 self, full_table_name: str, schema: dict, primary_keys: t.Sequence[str], cursor: Cursor, - partition_keys: list[str] | None = None, - as_temp_table: bool = False, + as_temp_table: bool = False, # noqa: FBT001, FBT002 ) -> Table: """Adapt target table to provided schema if possible. @@ -87,7 +96,6 @@ def prepare_table( # type: ignore[override] schema: the JSON Schema for the table. primary_keys: list of key properties. connection: the database connection. - partition_keys: list of partition keys. as_temp_table: True to create a temp table. Returns: @@ -116,7 +124,6 @@ def prepare_table( # type: ignore[override] meta=meta, schema=schema, primary_keys=primary_keys, - partition_keys=partition_keys, as_temp_table=as_temp_table, cursor=cursor, ) @@ -149,7 +156,7 @@ def copy_table_structure( full_table_name: str, from_table: Table, cursor: Cursor, - as_temp_table: bool = False, + as_temp_table: bool = False, # noqa: FBT001, FBT002 ) -> Table: """Copy table structure. @@ -157,6 +164,7 @@ def copy_table_structure( full_table_name: the target table name potentially including schema from_table: the source table connection: the database connection. + cursor: A redshift connector cursor. as_temp_table: True to create a temp table. Returns: @@ -165,26 +173,27 @@ def copy_table_structure( _, schema_name, table_name = self.parse_full_table_name(full_table_name) meta = MetaData(schema=schema_name) new_table: Table - columns = [] if self.table_exists(full_table_name=full_table_name): - raise RuntimeError("Table already exists") - for column in from_table.columns: - columns.append(column._copy()) + msg = "Table already exists" + raise RuntimeError(msg) + columns = [column._copy() for column in from_table.columns] # noqa: SLF001 if as_temp_table: new_table = Table(table_name, meta, *columns, prefixes=["TEMPORARY"]) else: new_table = Table(table_name, meta, *columns) - create_table_ddl = str(CreateTable(new_table).compile(dialect=self._engine.dialect)) + create_table_ddl = str( + CreateTable(new_table).compile(dialect=self._engine.dialect) + ) cursor.execute(create_table_ddl) return new_table - def drop_table(self, table: Table, cursor: Cursor): + def drop_table(self, table: Table, cursor: Cursor) -> None: """Drop table data.""" drop_table_ddl = str(DropTable(table).compile(dialect=self._engine.dialect)) cursor.execute(drop_table_ddl) - def to_sql_type(self, jsonschema_type: dict) -> TypeEngine: + def to_sql_type(self, jsonschema_type: dict) -> TypeEngine: # noqa: PLR0911 """Convert JSON Schema type to a SQL type. Args: @@ -216,15 +225,14 @@ def to_sql_type(self, jsonschema_type: dict) -> TypeEngine: return VARCHAR(self.default_varchar_length) - def create_empty_table( # type: ignore[override] + def create_empty_table( # type: ignore[override] # noqa: PLR0913 self, table_name: str, meta: MetaData, schema: dict, cursor: Cursor, primary_keys: t.Sequence[str] | None = None, - partition_keys: list[str] | None = None, - as_temp_table: bool = False, + as_temp_table: bool = False, # noqa: FBT001, FBT002 ) -> Table: """Create an empty target table. @@ -249,7 +257,11 @@ def create_empty_table( # type: ignore[override] try: properties: dict = schema["properties"] except KeyError: - raise RuntimeError(f"Schema for table_name: '{table_name}'" f"does not define properties: {schema}") + msg = ( + f"Schema for table_name: '{table_name}'" + f"does not define properties: {schema}" + ) + raise RuntimeError(msg) # noqa: B904 for property_name, property_jsonschema in properties.items(): is_primary_key = property_name in primary_keys @@ -266,7 +278,9 @@ def create_empty_table( # type: ignore[override] else: new_table = Table(table_name, meta, *columns) - create_table_ddl = str(CreateTable(new_table).compile(dialect=self._engine.dialect)) + create_table_ddl = str( + CreateTable(new_table).compile(dialect=self._engine.dialect) + ) cursor.execute(create_table_ddl) return new_table @@ -288,7 +302,9 @@ def prepare_column( column_object: a SQLAlchemy column. optional. """ column_name = column_name.lower().replace(" ", "_") - column_exists = column_object is not None or self.column_exists(full_table_name, column_name) + column_exists = column_object is not None or self.column_exists( + full_table_name, column_name + ) if not column_exists: self._create_empty_column( @@ -321,6 +337,7 @@ def _create_empty_column( full_table_name: The target table name. column_name: The name of the new column. sql_type: SQLAlchemy type engine to be used in creating the new column. + cursor: a database cursor. Raises: NotImplementedError: if adding columns is not supported. @@ -361,7 +378,10 @@ def get_column_add_ddl( # type: ignore[override] column = Column(column_name, column_type) return DDL( - ('ALTER TABLE "%(schema_name)s"."%(table_name)s"' "ADD COLUMN %(column_name)s %(column_type)s"), + ( + 'ALTER TABLE "%(schema_name)s"."%(table_name)s"' + "ADD COLUMN %(column_name)s %(column_type)s" + ), { "schema_name": schema_name, "table_name": table_name, @@ -383,6 +403,7 @@ def _adapt_column_type( full_table_name: The target table name. column_name: The target column name. sql_type: The new SQLAlchemy type. + cursor: a database cursor. Raises: NotImplementedError: if altering columns is not supported. @@ -452,7 +473,10 @@ def get_column_alter_ddl( # type: ignore[override] """ column = Column(column_name, column_type) return DDL( - ('ALTER TABLE "%(schema_name)s"."%(table_name)s"' "ALTER COLUMN %(column_name)s %(column_type)s"), + ( + 'ALTER TABLE "%(schema_name)s"."%(table_name)s"' + "ALTER COLUMN %(column_name)s %(column_type)s" + ), { "schema_name": schema_name, "table_name": table_name, @@ -467,20 +491,17 @@ def get_sqlalchemy_url(self, config: dict) -> str: Args: config: The configuration for the connector. """ - if config.get("sqlalchemy_url"): - return cast(str, config["sqlalchemy_url"]) - else: - user, password = self.get_credentials() - sqlalchemy_url = URL.create( - drivername=config["dialect+driver"], - username=user, - password=password, - host=config["host"], - port=config["port"], - database=config["dbname"], - query=self.get_sqlalchemy_query(config), - ) - return cast(str, sqlalchemy_url) + user, password = self.get_credentials() + sqlalchemy_url = URL.create( + drivername="redshift+redshift_connector", + username=user, + password=password, + host=config["host"], + port=config["port"], + database=config["dbname"], + query=self.get_sqlalchemy_query(config), + ) + return cast(str, sqlalchemy_url) def get_sqlalchemy_query(self, config: dict) -> dict: """Get query values to be used for sqlalchemy URL creation. @@ -500,9 +521,9 @@ def get_sqlalchemy_query(self, config: dict) -> dict: return query def get_credentials(self) -> tuple[str, str]: - """Use boto3 to get temporary cluster credentials + """Use boto3 to get temporary cluster credentials. - Returns + Returns: ------- tuple[str, str] username and password @@ -517,6 +538,4 @@ def get_credentials(self) -> tuple[str, str]: AutoCreate=False, ) return response["DbUser"], response["DbPassword"] - else: - return self.config["user"], self.config["password"] - + return self.config["user"], self.config["password"] diff --git a/target_redshift/sinks.py b/target_redshift/sinks.py index 90ad4af..0949e30 100644 --- a/target_redshift/sinks.py +++ b/target_redshift/sinks.py @@ -2,17 +2,16 @@ from __future__ import annotations -import uuid -from singer_sdk.sinks import SQLSink -import os import csv +import os +import uuid +from pathlib import Path +from typing import TYPE_CHECKING, Any, Iterable + +import boto3 import simplejson as json import sqlalchemy -import boto3 -from .connector import RedshiftConnector -from typing import List, Any, Iterable, Dict, Optional from botocore.exceptions import ClientError - from singer_sdk.helpers._compat import ( date_fromisoformat, datetime_fromisoformat, @@ -23,9 +22,12 @@ get_datelike_property_type, handle_invalid_timestamp_in_record, ) +from singer_sdk.sinks import SQLSink from target_redshift.connector import RedshiftConnector -from redshift_connector import Cursor + +if TYPE_CHECKING: + from redshift_connector import Cursor class RedshiftSink(SQLSink): @@ -34,7 +36,7 @@ class RedshiftSink(SQLSink): connector_class = RedshiftConnector MAX_SIZE_DEFAULT = 50000 - def __init__(self, *args, **kwargs): + def __init__(self, *args, **kwargs) -> None: # noqa: ANN002, ANN003 """Initialize SQL Sink. See super class for more details.""" super().__init__(*args, **kwargs) self.temp_table_name = self.generate_temp_table_name() @@ -53,7 +55,9 @@ def schema_name(self) -> str | None: The target schema name. """ # Look for a default_target_scheme in the configuraion fle - default_target_schema: str = self.config.get("default_target_schema", os.getenv("MELTANO_EXTRACT__LOAD_SCHEMA")) + default_target_schema: str = self.config.get( + "default_target_schema", os.getenv("MELTANO_EXTRACT__LOAD_SCHEMA") + ) parts = self.stream_name.split("-") # 1) When default_target_scheme is in the configuration use it @@ -74,7 +78,7 @@ def setup(self) -> None: self.append_only = True else: self.append_only = False - with self.connector._connect_cursor() as cursor: + with self.connector.connect_cursor() as cursor: if self.schema_name: self.connector.prepare_schema(self.schema_name, cursor=cursor) self.connector.prepare_table( @@ -85,9 +89,9 @@ def setup(self) -> None: as_temp_table=False, ) - def generate_temp_table_name(self): + def generate_temp_table_name(self) -> str: """Uuid temp table name.""" - # sqlalchemy.exc.IdentifierError: Identifier + # sqlalchemy.exc.IdentifierError: Identifier # noqa: ERA001 # 'temp_test_optional_attributes_388470e9_fbd0_47b7_a52f_d32a2ee3f5f6' # exceeds maximum length of 63 characters # Is hit if we have a long table name, there is no limit on Temporary tables @@ -105,9 +109,11 @@ def process_batch(self, context: dict) -> None: """ # If duplicates are merged, these can be tracked via # :meth:`~singer_sdk.Sink.tally_duplicate_merged()`. - with self.connector._connect_cursor() as cursor: + with self.connector.connect_cursor() as cursor: # Get target table - table: sqlalchemy.Table = self.connector.get_table(full_table_name=self.full_table_name) + table: sqlalchemy.Table = self.connector.get_table( + full_table_name=self.full_table_name + ) # Create a temp table (Creates from the table above) temp_table: sqlalchemy.Table = self.connector.copy_table_structure( full_table_name=self.temp_table_name, @@ -117,35 +123,31 @@ def process_batch(self, context: dict) -> None: ) # Insert into temp table self.file = f"{self.stream_name}-{self.temp_table_name}.csv" - self.path = os.path.join(self.config["temp_dir"], self.file) - self.object = os.path.join(self.config["s3_key_prefix"], self.file) - self.bulk_insert_records( + self.path = Path(self.config["temp_dir"]) / self.file + self.path.parent.mkdir(parents=True, exist_ok=True) + self.object = f'{self.config["s3_key_prefix"]} / {self.file}' + self._bulk_insert_records( table=temp_table, - schema=self.schema, - primary_keys=self.key_properties, records=context["records"], cursor=cursor, ) - self.logger.info(f'merging {len(context["records"])} records into {table}') + self.logger.info(f'merging {len(context["records"])} records into {table}') # noqa: G004 # Merge data from temp table to main table - self.upsert( + self._upsert( from_table=temp_table, to_table=table, - schema=self.schema, join_keys=self.key_properties, cursor=cursor, ) # clean_resources - self.clean_resources() + self._clean_resources() - def bulk_insert_records( # type: ignore[override] + def _bulk_insert_records( # type: ignore[override] self, table: sqlalchemy.Table, - schema: dict, - records: Iterable[Dict[str, Any]], - primary_keys: List[str], + records: Iterable[dict[str, Any]], cursor: Cursor, - ) -> Optional[int]: + ) -> int | None: """Bulk insert records to an existing destination table. The default implementation uses a generic SQLAlchemy bulk insert operation. @@ -153,28 +155,29 @@ def bulk_insert_records( # type: ignore[override] faster, native bulk uploads. Args: - full_table_name: the target table name. + table: the target table name. schema: the JSON schema for the new table, to be used when inferring column names. records: the input records. + cursor: the redshift connector cursor. Returns: True if table exists, False if not, None if unsure or undetectable. """ - self.write_csv(records) - self.logger.info(f'writing {len(records)} records to s3://{self.config["s3_bucket"]}/{self.object}') - self.copy_to_s3() - self.copy_to_redshift(table, cursor) + self._write_csv(records) + msg = f'writing {len(records)} records to s3://{self.config["s3_bucket"]}/{self.object}' + self.logger.info(msg) + self._copy_to_s3() + self._copy_to_redshift(table, cursor) return True - def upsert( + def _upsert( self, from_table: sqlalchemy.Table, to_table: sqlalchemy.Table, - schema: dict, - join_keys: List[str], + join_keys: list[str], cursor: Cursor, - ) -> Optional[int]: + ) -> int | None: """Merge upsert data from one table to another. Args: @@ -206,21 +209,44 @@ def upsert( cursor.execute(merge_sql) return None - def write_csv(self, records: List[dict]) -> int: - # """Write a CSV file.""" + def _write_csv(self, records: list[dict]) -> None: + """Write records to a local csv file. + + Parameters + ---------- + records : List[dict] + the input records. + + Returns: + ------- + None + + Raises: + ------ + ValueError + _description_ + """ if "properties" not in self.schema: - raise ValueError("Stream's schema has no properties defined.") - keys: List[str] = list(self.schema["properties"].keys()) - try: - os.mkdir(self.config["temp_dir"]) - except: - pass - object_keys = [key for key, value in self.schema["properties"].items() if "object" in value["type"] or "array" in value["type"]] + msg = "Stream's schema has no properties defined." + raise ValueError(msg) + keys: list[str] = list(self.schema["properties"].keys()) + object_keys = [ + key + for key, value in self.schema["properties"].items() + if "object" in value["type"] or "array" in value["type"] + ] records = [ - {key: (json.dumps(value).replace("None", "") if key in object_keys else value) for key, value in record.items()} + { + key: ( + json.dumps(value).replace("None", "") + if key in object_keys + else value + ) + for key, value in record.items() + } for record in records ] - with open(self.path, "w", encoding="utf-8", newline="") as fp: + with self.path.open("w", encoding="utf-8", newline="") as fp: writer = csv.DictWriter( fp, fieldnames=keys, @@ -229,13 +255,17 @@ def write_csv(self, records: List[dict]) -> int: ) writer.writerows(records) - def copy_to_s3(self): + def _copy_to_s3(self) -> None: + """Copy the csv file to s3.""" try: - _ = self.s3_client.upload_file(self.path, self.config["s3_bucket"], self.object) - except ClientError as e: - self.logger.error(e) + _ = self.s3_client.upload_file( + self.path, self.config["s3_bucket"], self.object + ) + except ClientError: + self.logger.exception() - def copy_to_redshift(self, table: sqlalchemy.Table, cursor: Cursor): + def _copy_to_redshift(self, table: sqlalchemy.Table, cursor: Cursor) -> None: + """Copy the s3 csv file to redshift.""" copy_credentials = f"IAM_ROLE '{self.config['aws_redshift_copy_role_arn']}'" # Step 3: Generate copy options - Override defaults from config.json if defined @@ -247,11 +277,11 @@ def copy_to_redshift(self, table: sqlalchemy.Table, cursor: Cursor): COMPUPDATE OFF STATUPDATE OFF """, ) - columns = ", ".join([f'"{column}"' for column in self.schema["properties"].keys()]) + columns = ", ".join([f'"{column}"' for column in self.schema["properties"]]) # Step 4: Load into the stage table copy_sql = f""" COPY {self.connector.quote(str(table))} ({columns}) - FROM 's3://{self.config["s3_bucket"]}/{self.object}' + FROM 's3://{self.config["s3_bucket"]}/{self.object}' {copy_credentials} {copy_options} CSV @@ -303,10 +333,13 @@ def _parse_timestamps_in_record( ) record[key] = date_val - def clean_resources(self): - os.remove(self.path) + def _clean_resources(self) -> None: + """Remove local and s3 resources.""" + Path.unlink(self.path) if self.config["remove_s3_files"]: try: - _ = self.s3_client.delete_object(Bucket=self.config["s3_bucket"], Key=self.object) - except ClientError as e: - self.logger.error(e) + _ = self.s3_client.delete_object( + Bucket=self.config["s3_bucket"], Key=self.object + ) + except ClientError: + self.logger.exception() diff --git a/target_redshift/target.py b/target_redshift/target.py index 815e928..934e3ae 100644 --- a/target_redshift/target.py +++ b/target_redshift/target.py @@ -2,13 +2,16 @@ from __future__ import annotations -from pathlib import PurePath +from typing import TYPE_CHECKING from singer_sdk import typing as th from singer_sdk.target_base import SQLTarget from target_redshift.sinks import RedshiftSink +if TYPE_CHECKING: + from pathlib import PurePath + class TargetRedshift(SQLTarget): """Target for Redshift.""" @@ -18,8 +21,8 @@ class TargetRedshift(SQLTarget): def __init__( self, config: dict | PurePath | str | list[PurePath | str] | None = None, - parse_env_config: bool = False, - validate_config: bool = True, + parse_env_config: bool = False, # noqa: FBT001, FBT002 + validate_config: bool = True, # noqa: FBT001, FBT002 ) -> None: """Initialize the target. @@ -38,7 +41,7 @@ def __init__( validate_config=validate_config, ) - assert self.config.get("add_record_metadata") or not self.config.get( + assert self.config.get("add_record_metadata") or not self.config.get( # noqa: S101 "activate_version" ), ( "Activate version messages can't be processed unless add_record_metadata " @@ -46,7 +49,7 @@ def __init__( "`activate_version` configuration to False." ) - assert self.config.get("s3_bucket") is not None + assert self.config.get("s3_bucket") is not None # noqa: S101 name = "target-redshift" @@ -55,7 +58,7 @@ def __init__( "host", th.StringType, description=( - "Hostname for redshift instance. Note if sqlalchemy_url is set this will be ignored." + "Hostname for redshift instance." ), ), th.Property( @@ -63,36 +66,39 @@ def __init__( th.StringType, default="5432", description=( - "The port on which redshift is awaiting connection. " - + "Note if sqlalchemy_url is set this will be ignored." + "The port on which redshift is awaiting connection." ), ), th.Property( "enable_iam_authentication", th.BooleanType, description=( - "If true, use temporary credentials (https://docs.aws.amazon.com/redshift/latest/mgmt/generating-iam-credentials-cli-api.html). Note if sqlalchemy_url is set this will be ignored." + "If true, use temporary credentials " + "(https://docs.aws.amazon.com/redshift/latest/mgmt/generating-iam-credentials-cli-api.html)." ), ), th.Property( "cluster_identifier", th.StringType, description=( - "Redshift cluster identifier. Note if sqlalchemy_url is set or enable_iam_authentication is false this will be ignored." + "Redshift cluster identifier. Note if sqlalchemy_url is set or " + "enable_iam_authentication is false this will be ignored." ), ), th.Property( "user", th.StringType, description=( - "User name used to authenticate. Note if sqlalchemy_url is set this will be ignored." + "User name used to authenticate. Note if sqlalchemy_url is set this " + "will be ignored." ), ), th.Property( "password", th.StringType, description=( - "Password used to authenticate. Note if sqlalchemy_url is set this will be ignored." + "Password used to authenticate. Note if sqlalchemy_url is set this " + "will be ignored." ), ), th.Property( @@ -119,8 +125,9 @@ def __init__( "s3_region", th.StringType, description=( - "AWS region for S3 bucket. If not specified, region will be detected by boto config resolution." - + "See https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html" + "AWS region for S3 bucket. If not specified, region will be " + "detected by boto config resolution. " + "See https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html" ), ), th.Property( @@ -152,7 +159,7 @@ def __init__( default=False, description=( "If set to false, the tap will ignore activate version messages. If " - + "set to true, add_record_metadata must be set to true as well." + "set to true, add_record_metadata must be set to true as well." ), ), th.Property( @@ -161,9 +168,9 @@ def __init__( default=False, description=( "When activate version is sent from a tap this specefies " - + "if we should delete the records that don't match, or mark " - + "them with a date in the `_sdc_deleted_at` column. This config " - + "option is ignored if `activate_version` is set to false." + "if we should delete the records that don't match, or mark " + "them with a date in the `_sdc_deleted_at` column. This config " + "option is ignored if `activate_version` is set to false." ), ), th.Property( @@ -172,9 +179,9 @@ def __init__( default=False, description=( "Note that this must be enabled for activate_version to work!" - + "This adds _sdc_extracted_at, _sdc_batched_at, and more to every " - + "table. See https://sdk.meltano.com/en/latest/implementation/record_metadata.html " # noqa: E501 - + "for more information." + "This adds _sdc_extracted_at, _sdc_batched_at, and more to every " + "table. See https://sdk.meltano.com/en/latest/implementation/record_metadata.html " # noqa: E501 + "for more information." ), ), th.Property( @@ -183,9 +190,9 @@ def __init__( default=False, description=( "Whether or not to use ssl to verify the server's identity. Use" - + " ssl_certificate_authority and ssl_mode for further customization." - + " To use a client certificate to authenticate yourself to the server," - + " use ssl_client_certificate_enable instead." + " ssl_certificate_authority and ssl_mode for further customization." + " To use a client certificate to authenticate yourself to the server," + " use ssl_client_certificate_enable instead." ), ), th.Property( @@ -194,8 +201,8 @@ def __init__( default="verify-full", description=( "SSL Protection method, see [redshift documentation](https://docs.aws.amazon.com/redshift/latest/mgmt/connecting-ssl-support.html" - + " for more information. Must be one of disable, allow, prefer," - + " require, verify-ca, or verify-full." + " for more information. Must be one of disable, allow, prefer," + " require, verify-ca, or verify-full." ), ), ).to_dict() diff --git a/tests/test_core.py b/tests/test_core.py index 206a33e..a265979 100644 --- a/tests/test_core.py +++ b/tests/test_core.py @@ -9,7 +9,6 @@ from target_redshift.target import TargetRedshift -# TODO: Initialize minimal target config SAMPLE_CONFIG: dict[str, t.Any] = {} @@ -34,6 +33,3 @@ def resource(self): # noqa: ANN201 https://github.com/meltano/sdk/tree/main/tests/samples """ return "resource" - - -# TODO: Create additional tests as appropriate for your target.