chore: fix lint and remove incorrect integration mark from unit tests (#4621)

* chore: fix lint and remove incorrect integration mark from unit tests

* add to test requirements

* revert athena source tests
This commit is contained in:
Aseem Bansal 2022-04-08 20:48:48 +05:30 committed by GitHub
parent cd43a4a543
commit 61a95f41ae
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 13 additions and 39 deletions

View File

@ -291,7 +291,9 @@ base_dev_requirements = {
"bigquery-usage",
"clickhouse",
"clickhouse-usage",
"druid",
"elasticsearch",
"ldap",
"looker",
"glue",
"mariadb",
@ -310,7 +312,7 @@ base_dev_requirements = {
"trino",
"hive",
"starburst-trino-usage",
"powerbi"
"powerbi",
# airflow is added below
]
for dependency in plugins[plugin]

View File

@ -109,7 +109,7 @@ class TableauSource(Source):
config: TableauConfig
report: SourceReport
platform = "tableau"
server: Server
server: Optional[Server]
upstream_tables: Dict[str, Tuple[Any, str]] = {}
def __hash__(self):
@ -136,7 +136,7 @@ class TableauSource(Source):
def _authenticate(self):
# https://tableau.github.io/server-client-python/docs/api-ref#authentication
authentication = None
authentication: Optional[Union[TableauAuth, PersonalAccessTokenAuth]] = None
if self.config.username and self.config.password:
authentication = TableauAuth(
username=self.config.username,

View File

@ -5,6 +5,7 @@ from unittest import mock
import pytest
from freezegun import freeze_time
from datahub.ingestion.api.common import PipelineContext
from src.datahub.ingestion.source.aws.s3_util import make_s3_urn
FROZEN_TIME = "2020-04-14 07:00:00"
@ -34,7 +35,6 @@ def test_athena_uri():
def test_athena_get_table_properties():
from pyathena.model import AthenaTableMetadata
from datahub.ingestion.api.common import PipelineContext
from datahub.ingestion.source.sql.athena import AthenaConfig, AthenaSource
config = AthenaConfig.parse_obj(

View File

@ -1,13 +1,11 @@
import json
import os
import pytest
@pytest.mark.integration
def test_bigquery_uri_with_credential():
from datahub.ingestion.source.usage.bigquery_usage import BigQueryUsageConfig
def test_bigquery_uri_with_credential():
expected_credential_json = {
"auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
"auth_uri": "https://accounts.google.com/o/oauth2/auth",

View File

@ -1,10 +1,8 @@
import pytest
@pytest.mark.integration
def test_clickhouse_uri_https():
from datahub.ingestion.source.sql.clickhouse import ClickHouseConfig
def test_clickhouse_uri_https():
config = ClickHouseConfig.parse_obj(
{
"username": "user",
@ -20,9 +18,7 @@ def test_clickhouse_uri_https():
)
@pytest.mark.integration
def test_clickhouse_uri_native():
from datahub.ingestion.source.sql.clickhouse import ClickHouseConfig
config = ClickHouseConfig.parse_obj(
{
@ -35,9 +31,7 @@ def test_clickhouse_uri_native():
assert config.get_sql_alchemy_url() == "clickhouse+native://user:password@host:1111"
@pytest.mark.integration
def test_clickhouse_uri_native_secure():
from datahub.ingestion.source.sql.clickhouse import ClickHouseConfig
config = ClickHouseConfig.parse_obj(
{

View File

@ -1,10 +1,8 @@
import pytest
@pytest.mark.integration
def test_druid_uri():
from datahub.ingestion.source.sql.druid import DruidConfig
def test_druid_uri():
config = DruidConfig.parse_obj({"host_port": "localhost:8082"})
assert config.get_sql_alchemy_url() == "druid://localhost:8082/druid/v2/sql/"

View File

@ -1,13 +1,11 @@
import deepdiff
import pytest
from datahub.ingestion.api.common import PipelineContext
from datahub.ingestion.source.sql.hive import HiveConfig, HiveSource
from datahub.utilities.hive_schema_to_avro import get_avro_schema_for_hive_column
@pytest.mark.integration
def test_hive_configuration_get_identifier_with_database():
from datahub.ingestion.api.common import PipelineContext
from datahub.ingestion.source.sql.hive import HiveConfig, HiveSource
test_db_name = "test_database"
# test_table_name = "test_table"
@ -26,7 +24,6 @@ def test_hive_configuration_get_identifier_with_database():
assert output == [expected_output]
@pytest.mark.integration
def test_hive_configuration_get_avro_schema_from_native_data_type():
# Test 3 - struct of struct
datatype_string = "struct<type:string,provider:array<int>,abc:struct<t1:string>>"

View File

@ -1,9 +1,9 @@
import pytest
from datahub.ingestion.source.ldap import parse_from_attrs, strip_ldap_info
@pytest.mark.integration
def test_strip_ldap_info():
from datahub.ingestion.source.ldap import strip_ldap_info
assert (
strip_ldap_info(b"uid=firstname.surname,ou=People,dc=internal,dc=machines")
@ -11,7 +11,6 @@ def test_strip_ldap_info():
)
@pytest.mark.integration
@pytest.mark.parametrize(
"input, expected",
[
@ -35,7 +34,6 @@ def test_strip_ldap_info():
],
)
def test_parse_from_attrs(input, expected):
from datahub.ingestion.source.ldap import parse_from_attrs
assert (
parse_from_attrs(

View File

@ -40,7 +40,6 @@ def test_delayed_iter():
]
@pytest.mark.integration
@pytest.mark.skipif(
sys.version_info < (3, 7), reason="The LookML source requires Python 3.7+"
)
@ -52,7 +51,6 @@ def test_metadatasql_sql_parser_get_tables_from_simple_query():
assert tables_list == ["bar", "foo"]
@pytest.mark.integration
@pytest.mark.skipif(
sys.version_info < (3, 7), reason="The LookML source requires Python 3.7+"
)
@ -64,7 +62,6 @@ def test_sqllineage_sql_parser_get_tables_from_simple_query():
assert tables_list == ["bar", "foo"]
@pytest.mark.integration
@pytest.mark.skipif(
sys.version_info < (3, 7), reason="The LookML source requires Python 3.7+"
)
@ -121,7 +118,6 @@ date :: date) <= 7
assert tables_list == ["schema1.foo", "schema2.bar"]
@pytest.mark.integration
@pytest.mark.skipif(
sys.version_info < (3, 7), reason="The LookML source requires Python 3.7+"
)
@ -141,7 +137,6 @@ def test_sqllineage_sql_parser_get_columns_from_simple_query():
assert columns_list == ["a", "b"]
@pytest.mark.integration
@pytest.mark.skipif(
sys.version_info < (3, 7), reason="The LookML source requires Python 3.7+"
)
@ -153,7 +148,6 @@ def test_metadatasql_sql_parser_get_columns_with_alias_and_count_star():
assert columns_list == ["a", "b", "count", "test"]
@pytest.mark.integration
@pytest.mark.skipif(
sys.version_info < (3, 7), reason="The LookML source requires Python 3.7+"
)
@ -181,7 +175,6 @@ WHERE
assert columns_list == ["bs", "pi", "pt", "pu", "v"]
@pytest.mark.integration
@pytest.mark.skipif(
sys.version_info < (3, 7), reason="The LookML source requires Python 3.7+"
)
@ -238,7 +231,6 @@ date :: date) <= 7
assert columns_list == ["c", "date", "e", "u", "x"]
@pytest.mark.integration
@pytest.mark.skipif(
sys.version_info < (3, 7), reason="The LookML source requires Python 3.7+"
)
@ -257,7 +249,6 @@ def test_metadatasql_sql_parser_get_tables_from_templated_query():
assert tables_list == ["my_view.SQL_TABLE_NAME"]
@pytest.mark.integration
@pytest.mark.skipif(
sys.version_info < (3, 7), reason="The LookML source requires Python 3.7+"
)
@ -276,7 +267,6 @@ def test_sqllineage_sql_parser_get_tables_from_templated_query():
assert tables_list == ["my_view.SQL_TABLE_NAME"]
@pytest.mark.integration
@pytest.mark.skipif(
sys.version_info < (3, 7), reason="The LookML source requires Python 3.7+"
)
@ -295,7 +285,6 @@ def test_metadatasql_sql_parser_get_columns_from_templated_query():
assert columns_list == ["city", "country", "measurement", "timestamp"]
@pytest.mark.integration
@pytest.mark.skipif(
sys.version_info < (3, 7), reason="The LookML source requires Python 3.7+"
)
@ -314,7 +303,6 @@ def test_sqllineage_sql_parser_get_columns_from_templated_query():
assert columns_list == ["city", "country", "measurement", "timestamp"]
@pytest.mark.integration
@pytest.mark.skipif(
sys.version_info < (3, 7), reason="The LookML source requires Python 3.7+"
)
@ -329,7 +317,6 @@ def test_sqllineage_sql_parser_with_weird_lookml_query():
assert columns_list == ["aliased_platform", "country", "date"]
@pytest.mark.integration
@pytest.mark.skipif(
sys.version_info < (3, 7), reason="The LookML source requires Python 3.7+"
)