mirror of
https://github.com/open-metadata/OpenMetadata.git
synced 2025-12-31 01:15:44 +00:00
MINOR - Clean up configs & add auto classification docs (#18907)
* MINOR - Clean up configs & add auto classification docs * deprecation notice
This commit is contained in:
parent
75d12720fa
commit
613fd331e0
@ -1745,7 +1745,7 @@ WHERE JSON_EXTRACT(json, '$.pipelineType') = 'metadata';
|
||||
|
||||
-- classification and sampling configs from the profiler pipelines
|
||||
UPDATE ingestion_pipeline_entity
|
||||
SET json = JSON_REMOVE(json, '$.sourceConfig.config.processPiiSensitive', '$.sourceConfig.config.confidence', '$.sourceConfig.config.generateSampleData')
|
||||
SET json = JSON_REMOVE(json, '$.sourceConfig.config.processPiiSensitive', '$.sourceConfig.config.confidence', '$.sourceConfig.config.generateSampleData', '$.sourceConfig.config.sampleDataCount')
|
||||
WHERE JSON_EXTRACT(json, '$.pipelineType') = 'profiler';
|
||||
|
||||
-- Rename 'jobId' to 'jobIds', set 'jobId' as type array in 'jobIds' , add 'projectIds' for dbt cloud
|
||||
|
||||
@ -1732,7 +1732,7 @@ WHERE json #>> '{pipelineType}' = 'metadata';
|
||||
|
||||
-- classification and sampling configs from the profiler pipelines
|
||||
UPDATE ingestion_pipeline_entity
|
||||
SET json = json::jsonb #- '{sourceConfig,config,processPiiSensitive}' #- '{sourceConfig,config,confidence}' #- '{sourceConfig,config,generateSampleData}'
|
||||
SET json = json::jsonb #- '{sourceConfig,config,processPiiSensitive}' #- '{sourceConfig,config,confidence}' #- '{sourceConfig,config,generateSampleData}' #- '{sourceConfig,config,sampleDataCount}'
|
||||
WHERE json #>> '{pipelineType}' = 'profiler';
|
||||
|
||||
-- set value of 'jobId' as an array into 'jobIds' for dbt cloud
|
||||
|
||||
@ -144,7 +144,7 @@ class AWSClient:
|
||||
def get_client(self, service_name: str) -> Any:
|
||||
# initialize the client depending on the AWSCredentials passed
|
||||
if self.config is not None:
|
||||
logger.info(f"Getting AWS client for service [{service_name}]")
|
||||
logger.debug(f"Getting AWS client for service [{service_name}]")
|
||||
session = self.create_session()
|
||||
if self.config.endPointURL is not None:
|
||||
return session.client(
|
||||
@ -152,7 +152,7 @@ class AWSClient:
|
||||
)
|
||||
return session.client(service_name=service_name)
|
||||
|
||||
logger.info(f"Getting AWS default client for service [{service_name}]")
|
||||
logger.debug(f"Getting AWS default client for service [{service_name}]")
|
||||
# initialized with the credentials loaded from running machine
|
||||
return boto3.client(service_name=service_name)
|
||||
|
||||
|
||||
@ -154,7 +154,6 @@ class ProfilerSource(ProfilerSourceInterface):
|
||||
profile_sample_type=self.source_config.profileSampleType,
|
||||
sampling_method_type=self.source_config.samplingMethodType,
|
||||
),
|
||||
default_sample_data_count=self.source_config.sampleDataCount,
|
||||
)
|
||||
|
||||
profiler_interface: ProfilerInterface = profiler_class.create(
|
||||
|
||||
@ -88,11 +88,7 @@ class SamplerProcessor(Processor):
|
||||
schema_entity=schema_entity,
|
||||
database_entity=database_entity,
|
||||
table_config=get_config_for_table(entity, self.profiler_config),
|
||||
default_sample_config=SampleConfig(
|
||||
profile_sample=self.source_config.profileSample,
|
||||
profile_sample_type=self.source_config.profileSampleType,
|
||||
sampling_method_type=self.source_config.samplingMethodType,
|
||||
),
|
||||
default_sample_config=SampleConfig(),
|
||||
default_sample_data_count=self.source_config.sampleDataCount,
|
||||
)
|
||||
sample_data = SampleData(
|
||||
|
||||
@ -0,0 +1,154 @@
|
||||
## Auto Classification
|
||||
|
||||
The Auto Classification workflow will be using the `orm-profiler` processor.
|
||||
|
||||
After running a Metadata Ingestion workflow, we can run the Auto Classification workflow.
|
||||
While the `serviceName` will be the same to that was used in Metadata Ingestion, so the ingestion bot can get the `serviceConnection` details from the server.
|
||||
|
||||
|
||||
### 1. Define the YAML Config
|
||||
|
||||
This is a sample config for the Auto Classification Workflow:
|
||||
|
||||
{% codePreview %}
|
||||
|
||||
{% codeInfoContainer %}
|
||||
|
||||
#### Source Configuration - Source Config
|
||||
|
||||
You can find all the definitions and types for the `sourceConfig` [here](https://github.com/open-metadata/OpenMetadata/blob/main/openmetadata-spec/src/main/resources/json/schema/metadataIngestion/databaseServiceAutoClassificationPipeline.json).
|
||||
|
||||
{% codeInfo srNumber=14 %}
|
||||
|
||||
**storeSampleData**: Option to turn on/off storing sample data. If enabled, we will ingest sample data for each table.
|
||||
|
||||
{% /codeInfo %}
|
||||
|
||||
{% codeInfo srNumber=15 %}
|
||||
|
||||
**enableAutoClassification**: Optional configuration to automatically tag columns that might contain sensitive information.
|
||||
|
||||
{% /codeInfo %}
|
||||
|
||||
{% codeInfo srNumber=18 %}
|
||||
|
||||
**confidence**: Set the Confidence value for which you want the column to be tagged as PII. Confidence value ranges from 0 to 100. A higher number will yield less false positives but more false negatives. A lower number will yield more false positives but less false negatives.
|
||||
|
||||
{% /codeInfo %}
|
||||
|
||||
{% codeInfo srNumber=19 %}
|
||||
|
||||
**databaseFilterPattern**: Regex to only fetch databases that matches the pattern.
|
||||
|
||||
{% /codeInfo %}
|
||||
|
||||
{% codeInfo srNumber=20 %}
|
||||
|
||||
**schemaFilterPattern**: Regex to only fetch tables or databases that matches the pattern.
|
||||
|
||||
{% /codeInfo %}
|
||||
|
||||
{% codeInfo srNumber=21 %}
|
||||
|
||||
**tableFilterPattern**: Regex to only fetch tables or databases that matches the pattern.
|
||||
|
||||
{% /codeInfo %}
|
||||
|
||||
{% codeInfo srNumber=22 %}
|
||||
|
||||
#### Processor Configuration
|
||||
|
||||
Choose the `orm-profiler`. Its config can also be updated to define tests from the YAML itself instead of the UI:
|
||||
|
||||
**tableConfig**: `tableConfig` allows you to set up some configuration at the table level.
|
||||
{% /codeInfo %}
|
||||
|
||||
|
||||
{% codeInfo srNumber=23 %}
|
||||
|
||||
#### Sink Configuration
|
||||
|
||||
To send the metadata to OpenMetadata, it needs to be specified as `type: metadata-rest`.
|
||||
{% /codeInfo %}
|
||||
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/workflow-config-def.md" /%}
|
||||
|
||||
{% /codeInfoContainer %}
|
||||
|
||||
{% codeBlock fileName="filename.yaml" %}
|
||||
|
||||
|
||||
```yaml {% isCodeBlock=true %}
|
||||
source:
|
||||
type: {% $connector %}
|
||||
serviceName: {% $connector %}
|
||||
sourceConfig:
|
||||
config:
|
||||
type: AutoClassification
|
||||
```
|
||||
```yaml {% srNumber=14 %}
|
||||
# storeSampleData: true
|
||||
```
|
||||
```yaml {% srNumber=15 %}
|
||||
# enableAutoClassification: true
|
||||
```
|
||||
```yaml {% srNumber=18 %}
|
||||
# confidence: 80
|
||||
```
|
||||
```yaml {% srNumber=19 %}
|
||||
# databaseFilterPattern:
|
||||
# includes:
|
||||
# - database1
|
||||
# - database2
|
||||
# excludes:
|
||||
# - database3
|
||||
# - database4
|
||||
```
|
||||
```yaml {% srNumber=20 %}
|
||||
# schemaFilterPattern:
|
||||
# includes:
|
||||
# - schema1
|
||||
# - schema2
|
||||
# excludes:
|
||||
# - schema3
|
||||
# - schema4
|
||||
```
|
||||
```yaml {% srNumber=21 %}
|
||||
# tableFilterPattern:
|
||||
# includes:
|
||||
# - table1
|
||||
# - table2
|
||||
# excludes:
|
||||
# - table3
|
||||
# - table4
|
||||
```
|
||||
|
||||
```yaml {% srNumber=22 %}
|
||||
processor:
|
||||
type: orm-profiler
|
||||
config: {}
|
||||
```
|
||||
|
||||
```yaml {% srNumber=23 %}
|
||||
sink:
|
||||
type: metadata-rest
|
||||
config: {}
|
||||
```
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/workflow-config.md" /%}
|
||||
|
||||
{% /codeBlock %}
|
||||
|
||||
{% /codePreview %}
|
||||
|
||||
|
||||
### 2. Run with the CLI
|
||||
|
||||
After saving the YAML config, we will run the command the same way we did for the metadata ingestion:
|
||||
|
||||
```bash
|
||||
metadata classify -c <path-to-yaml>
|
||||
```
|
||||
|
||||
Note now instead of running `ingest`, we are using the `classify` command to select the Auto Classification workflow.
|
||||
@ -2,7 +2,7 @@
|
||||
|
||||
The Data Profiler workflow will be using the `orm-profiler` processor.
|
||||
|
||||
After running a Metadata Ingestion workflow, we can run Data Profiler workflow.
|
||||
After running a Metadata Ingestion workflow, we can run the Data Profiler workflow.
|
||||
While the `serviceName` will be the same to that was used in Metadata Ingestion, so the ingestion bot can get the `serviceConnection` details from the server.
|
||||
|
||||
|
||||
@ -14,15 +14,10 @@ This is a sample config for the profiler:
|
||||
|
||||
{% codeInfoContainer %}
|
||||
|
||||
{% codeInfo srNumber=13 %}
|
||||
#### Source Configuration - Source Config
|
||||
|
||||
You can find all the definitions and types for the `sourceConfig` [here](https://github.com/open-metadata/OpenMetadata/blob/main/openmetadata-spec/src/main/resources/json/schema/metadataIngestion/databaseServiceProfilerPipeline.json).
|
||||
|
||||
**generateSampleData**: Option to turn on/off generating sample data.
|
||||
|
||||
{% /codeInfo %}
|
||||
|
||||
{% codeInfo srNumber=14 %}
|
||||
|
||||
**profileSample**: Percentage of data or no. of rows we want to execute the profiler and tests on.
|
||||
@ -35,19 +30,6 @@ You can find all the definitions and types for the `sourceConfig` [here](https:
|
||||
|
||||
{% /codeInfo %}
|
||||
|
||||
{% codeInfo srNumber=16 %}
|
||||
|
||||
**processPiiSensitive**: Optional configuration to automatically tag columns that might contain sensitive information.
|
||||
|
||||
{% /codeInfo %}
|
||||
|
||||
{% codeInfo srNumber=17 %}
|
||||
|
||||
**confidence**: Set the Confidence value for which you want the column to be marked
|
||||
|
||||
{% /codeInfo %}
|
||||
|
||||
|
||||
{% codeInfo srNumber=18 %}
|
||||
|
||||
**timeoutSeconds**: Profiler Timeout in Seconds
|
||||
@ -100,27 +82,17 @@ To send the metadata to OpenMetadata, it needs to be specified as `type: metadat
|
||||
```yaml {% isCodeBlock=true %}
|
||||
source:
|
||||
type: {% $connector %}
|
||||
serviceName: local_athena
|
||||
serviceName: {% $connector %}
|
||||
sourceConfig:
|
||||
config:
|
||||
type: Profiler
|
||||
```
|
||||
|
||||
```yaml {% srNumber=13 %}
|
||||
generateSampleData: true
|
||||
```
|
||||
```yaml {% srNumber=14 %}
|
||||
# profileSample: 85
|
||||
```
|
||||
```yaml {% srNumber=15 %}
|
||||
# threadCount: 5
|
||||
```
|
||||
```yaml {% srNumber=16 %}
|
||||
processPiiSensitive: false
|
||||
```
|
||||
```yaml {% srNumber=17 %}
|
||||
# confidence: 80
|
||||
```
|
||||
```yaml {% srNumber=18 %}
|
||||
# timeoutSeconds: 43200
|
||||
```
|
||||
@ -158,8 +130,6 @@ processor:
|
||||
config: {} # Remove braces if adding properties
|
||||
# tableConfig:
|
||||
# - fullyQualifiedName: <table fqn>
|
||||
# profileSample: <number between 0 and 99> # default
|
||||
|
||||
# profileSample: <number between 0 and 99> # default will be 100 if omitted
|
||||
# profileQuery: <query to use for sampling data for the profiler>
|
||||
# columnConfig:
|
||||
|
||||
@ -93,7 +93,7 @@ For a simple, local installation using our docker containers, this looks like:
|
||||
```yaml {% srNumber=40 %}
|
||||
source:
|
||||
type: {% $connector %}-lineage
|
||||
serviceName: <serviceName (same as metadata ingestion service name)>
|
||||
serviceName: {% $connector %}
|
||||
sourceConfig:
|
||||
config:
|
||||
type: DatabaseLineage
|
||||
|
||||
@ -62,7 +62,7 @@ Note that the location is a directory that will be cleaned at the end of the ing
|
||||
```yaml {% isCodeBlock=true %}
|
||||
source:
|
||||
type: {% $connector %}-usage
|
||||
serviceName: <service name>
|
||||
serviceName: {% $connector %}
|
||||
sourceConfig:
|
||||
config:
|
||||
type: DatabaseUsage
|
||||
|
||||
@ -84,33 +84,6 @@ during the migration after bumping this value, you can increase them further.
|
||||
|
||||
After the migration is finished, you can revert this changes.
|
||||
|
||||
# New Versioning System for Ingestion Docker Image
|
||||
|
||||
We are excited to announce a recent change in our version tagging system for our Ingestion Docker images. This update aims to improve consistency and clarity in our versioning, aligning our Docker image tags with our Python PyPi package versions.
|
||||
|
||||
### Ingestion Docker Image Tags
|
||||
|
||||
To maintain consistency, our Docker images will now follow the same 4-digit versioning system as of Python Package versions. For example, a Docker image version might look like `1.0.0.0`.
|
||||
|
||||
Additionally, we will continue to provide a 3-digit version tag (e.g., `1.0.0`) that will always point to the latest corresponding 4-digit image tag. This ensures ease of use for those who prefer a simpler version tag while still having access to the most recent updates.
|
||||
|
||||
### Benefits
|
||||
|
||||
**Consistency**: Both Python applications and Docker images will have the same versioning format, making it easier to track and manage versions.
|
||||
**Clarity**: The 4-digit system provides a clear and detailed versioning structure, helping users understand the nature and scope of changes.
|
||||
**Non-Breaking Change**: This update is designed to be non-disruptive. Existing Ingestions and dependencies will remain unaffected.
|
||||
|
||||
#### Example
|
||||
|
||||
Here’s an example of how the new versioning works:
|
||||
|
||||
**Python Application Version**: `1.5.0.0`
|
||||
**Docker Image Tags**:
|
||||
- `1.5.0.0` (specific version)
|
||||
- `1.5.0` (latest version in the 1.5.0.x series)
|
||||
|
||||
We believe this update will bring greater consistency and clarity to our versioning system. As always, we value your feedback and welcome any questions or comments you may have.
|
||||
|
||||
# Backward Incompatible Changes
|
||||
|
||||
## 1.6.0
|
||||
@ -145,6 +118,13 @@ removing these properties as well.
|
||||
- If you still want to use the Auto PII Classification and sampling features, you can create the new workflow
|
||||
from the UI.
|
||||
|
||||
### Collate - Metadata Actions for ML Tagging - Deprecation Notice
|
||||
|
||||
Since we are introducing the `Auto Classification` workflow, **we are going to remove in 1.7 the `ML Tagging` action**
|
||||
from the Metadata Actions. That feature will be covered already by the `Auto Classification` workflow, which even brings
|
||||
more flexibility allow the on-the-fly usage of the sample data for classification purposes without having to store
|
||||
it in the database.
|
||||
|
||||
### Service Spec for the Ingestion Framework
|
||||
|
||||
This impacts users who maintain their own connectors for the ingestion framework that are **NOT** part of the
|
||||
|
||||
@ -363,6 +363,8 @@ source:
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/data-profiler.md" variables={connector: "athena"} /%}
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/auto-classification.md" variables={connector: "athena"} /%}
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/data-quality.md" /%}
|
||||
|
||||
## dbt Integration
|
||||
|
||||
@ -204,6 +204,8 @@ source:
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/data-profiler.md" variables={connector: "azuresql"} /%}
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/auto-classification.md" variables={connector: "azuresql"} /%}
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/data-quality.md" /%}
|
||||
|
||||
## dbt Integration
|
||||
|
||||
@ -235,6 +235,8 @@ source:
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/data-profiler.md" variables={connector: "bigquery"} /%}
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/auto-classification.md" variables={connector: "bigquery"} /%}
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/data-quality.md" /%}
|
||||
|
||||
## dbt Integration
|
||||
|
||||
@ -240,6 +240,8 @@ source:
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/data-profiler.md" variables={connector: "clickhouse"} /%}
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/auto-classification.md" variables={connector: "clickhouse"} /%}
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/data-quality.md" /%}
|
||||
|
||||
## dbt Integration
|
||||
|
||||
@ -181,6 +181,8 @@ source:
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/data-profiler.md" variables={connector: "databricks"} /%}
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/auto-classification.md" variables={connector: "databricks"} /%}
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/data-quality.md" /%}
|
||||
|
||||
## dbt Integration
|
||||
|
||||
@ -178,6 +178,8 @@ source:
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/data-profiler.md" variables={connector: "db2"} /%}
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/auto-classification.md" variables={connector: "db2"} /%}
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/data-quality.md" /%}
|
||||
|
||||
## dbt Integration
|
||||
|
||||
@ -157,6 +157,8 @@ source:
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/data-profiler.md" variables={connector: "doris"} /%}
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/auto-classification.md" variables={connector: "doris"} /%}
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/data-quality.md" /%}
|
||||
|
||||
## Securing Doris Connection with SSL in OpenMetadata
|
||||
|
||||
@ -153,6 +153,8 @@ source:
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/data-profiler.md" variables={connector: "druid"} /%}
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/auto-classification.md" variables={connector: "druid"} /%}
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/data-quality.md" /%}
|
||||
|
||||
## dbt Integration
|
||||
|
||||
@ -265,6 +265,8 @@ source:
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/data-profiler.md" variables={connector: "greenplum"} /%}
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/auto-classification.md" variables={connector: "greenplum"} /%}
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/data-quality.md" /%}
|
||||
|
||||
## Securing Greenplum Connection with SSL in OpenMetadata
|
||||
|
||||
@ -200,6 +200,8 @@ source:
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/data-profiler.md" variables={connector: "hive"} /%}
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/auto-classification.md" variables={connector: "hive"} /%}
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/data-quality.md" /%}
|
||||
|
||||
## Securing Hive Connection with SSL in OpenMetadata
|
||||
|
||||
@ -162,6 +162,8 @@ source:
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/data-profiler.md" variables={connector: "impala"} /%}
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/auto-classification.md" variables={connector: "impala"} /%}
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/data-quality.md" /%}
|
||||
|
||||
## Securing Impala Connection with SSL in OpenMetadata
|
||||
|
||||
@ -157,6 +157,8 @@ source:
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/data-profiler.md" variables={connector: "mariadb"} /%}
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/auto-classification.md" variables={connector: "mariadb"} /%}
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/data-quality.md" /%}
|
||||
|
||||
## dbt Integration
|
||||
|
||||
@ -187,6 +187,8 @@ source:
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/data-profiler.md" variables={connector: "mssql"} /%}
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/auto-classification.md" variables={connector: "mssql"} /%}
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/data-quality.md" /%}
|
||||
|
||||
## dbt Integration
|
||||
|
||||
@ -269,6 +269,8 @@ source:
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/data-profiler.md" variables={connector: "mysql"} /%}
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/auto-classification.md" variables={connector: "mysql"} /%}
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/data-quality.md" /%}
|
||||
|
||||
## Securing MySQL Connection with SSL in OpenMetadata
|
||||
|
||||
@ -233,6 +233,8 @@ source:
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/data-profiler.md" variables={connector: "oracle"} /%}
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/auto-classification.md" variables={connector: "oracle"} /%}
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/data-quality.md" /%}
|
||||
|
||||
## Lineage
|
||||
|
||||
@ -151,6 +151,8 @@ source:
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/data-profiler.md" variables={connector: "pinotdb"} /%}
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/auto-classification.md" variables={connector: "pinotdb"} /%}
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/data-quality.md" /%}
|
||||
|
||||
## dbt Integration
|
||||
|
||||
@ -292,6 +292,8 @@ source:
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/data-profiler.md" variables={connector: "postgres"} /%}
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/auto-classification.md" variables={connector: "postgres"} /%}
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/data-quality.md" /%}
|
||||
|
||||
## Securing Postgres Connection with SSL in OpenMetadata
|
||||
|
||||
@ -160,6 +160,8 @@ source:
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/data-profiler.md" variables={connector: "presto"} /%}
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/auto-classification.md" variables={connector: "presto"} /%}
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/data-quality.md" /%}
|
||||
|
||||
## dbt Integration
|
||||
|
||||
@ -210,6 +210,8 @@ source:
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/data-profiler.md" variables={connector: "redshift"} /%}
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/auto-classification.md" variables={connector: "redshift"} /%}
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/data-quality.md" /%}
|
||||
|
||||
## Securing Redshift Connection with SSL in OpenMetadata
|
||||
|
||||
@ -223,6 +223,8 @@ source:
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/data-profiler.md" variables={connector: "sapHana"} /%}
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/auto-classification.md" variables={connector: "sapHana"} /%}
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/data-quality.md" /%}
|
||||
|
||||
## dbt Integration
|
||||
|
||||
@ -153,6 +153,8 @@ source:
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/data-profiler.md" variables={connector: "singlestore"} /%}
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/auto-classification.md" variables={connector: "singlestore"} /%}
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/data-quality.md" /%}
|
||||
|
||||
## dbt Integration
|
||||
|
||||
@ -275,6 +275,8 @@ source:
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/data-profiler.md" variables={connector: "snowflake"} /%}
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/auto-classification.md" variables={connector: "snowflake"} /%}
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/data-quality.md" /%}
|
||||
|
||||
## dbt Integration
|
||||
|
||||
@ -160,6 +160,8 @@ source:
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/data-profiler.md" variables={connector: "sqlite"} /%}
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/auto-classification.md" variables={connector: "sqlite"} /%}
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/data-quality.md" /%}
|
||||
|
||||
## Lineage
|
||||
|
||||
@ -205,6 +205,8 @@ source:
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/data-profiler.md" variables={connector: "synapse"} /%}
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/auto-classification.md" variables={connector: "synapse"} /%}
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/data-quality.md" /%}
|
||||
|
||||
## dbt Integration
|
||||
|
||||
@ -114,4 +114,6 @@ source:
|
||||
|
||||
{% partial file="/v1.6/connectors/yaml/data-profiler.md" variables={connector: "teradata"} /%}
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/auto-classification.md" variables={connector: "teradata"} /%}
|
||||
|
||||
{% partial file="/v1.6/connectors/yaml/data-quality.md" /%}
|
||||
|
||||
@ -224,6 +224,8 @@ source:
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/data-profiler.md" variables={connector: "trino"} /%}
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/auto-classification.md" variables={connector: "trino"} /%}
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/data-quality.md" /%}
|
||||
|
||||
## SSL Configuration
|
||||
|
||||
@ -197,6 +197,8 @@ source:
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/data-profiler.md" variables={connector: "vertica"} /%}
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/auto-classification.md" variables={connector: "vertica"} /%}
|
||||
|
||||
{% partial file="/v1.5/connectors/yaml/data-quality.md" /%}
|
||||
|
||||
## dbt Integration
|
||||
|
||||
@ -68,20 +68,6 @@
|
||||
"default": 80,
|
||||
"title": "Auto Classification Inference Confidence Level"
|
||||
},
|
||||
"profileSampleType": {
|
||||
"$ref": "../entity/data/table.json#/definitions/profileSampleType",
|
||||
"title": "Profile Sample Type"
|
||||
},
|
||||
"profileSample": {
|
||||
"description": "Percentage of data or no. of rows used to compute the profiler metrics and run data quality tests",
|
||||
"type": "number",
|
||||
"default": null,
|
||||
"title": "Profile Sample"
|
||||
},
|
||||
"samplingMethodType": {
|
||||
"$ref": "../entity/data/table.json#/definitions/samplingMethodType",
|
||||
"title": "Sampling Method Type"
|
||||
},
|
||||
"sampleDataCount": {
|
||||
"description": "Number of sample rows to ingest when 'Generate Sample Data' is enabled",
|
||||
"type": "integer",
|
||||
|
||||
@ -88,12 +88,6 @@
|
||||
"$ref": "../entity/data/table.json#/definitions/samplingMethodType",
|
||||
"title": "Sampling Method Type"
|
||||
},
|
||||
"sampleDataCount": {
|
||||
"description": "Number of sample rows to ingest when 'Generate Sample Data' is enabled",
|
||||
"type": "integer",
|
||||
"default": 50,
|
||||
"title": "Sample Data Rows Count"
|
||||
},
|
||||
"threadCount": {
|
||||
"description": "Number of threads to use during metric computations",
|
||||
"type": "number",
|
||||
|
||||
@ -79,7 +79,7 @@ $$
|
||||
$$section
|
||||
### Store Sample Data $(id="storeSampleData")
|
||||
|
||||
Set the Store Sample Data toggle to control whether to store sample data as part of Auto Classification workflow. If this is enabled, 100 rows will be ingested by default. You can update the number of rows in the "DatabaseServiceProfilerPipeline Advanced Config" section (i.e. `Sample Data Rows Count` setting).
|
||||
Set the Store Sample Data toggle to control whether to store sample data as part of Auto Classification workflow. If this is enabled, 50 rows will be ingested by default. You can update the number of rows in the "DatabaseServiceProfilerPipeline Advanced Config" section (i.e. `Sample Data Rows Count` setting).
|
||||
|
||||
If disabled, OpenMetadata will not store any sample data, but will still use it on-the-fly to compute the Auto Classification.
|
||||
$$
|
||||
@ -97,29 +97,6 @@ $$section
|
||||
Confidence level to use when inferring whether a column should be applied the classification or not (between 0 and 100). A number closer to 100 will yield less false positive but potentially more false negative.
|
||||
$$
|
||||
|
||||
$$section
|
||||
### Profile Sample Type $(id="profileSampleType")
|
||||
The sample type can be set to either:
|
||||
|
||||
* **Percentage**: this will use a percentage to sample the table (e.g. if table has 100 rows, and we set sample percentage tp 50%, the profiler will use 50 random rows to compute the metrics).
|
||||
* **Row Count**: this will use a number of rows to sample the table (e.g. if table has 100 rows, and we set row count to 10, the profiler will use 10 random rows to compute the metrics).
|
||||
$$
|
||||
|
||||
$$section
|
||||
### Profile Sample $(id="profileSample")
|
||||
Percentage of data or number of rows to use when sampling tables to compute the profiler metrics. By default (i.e. if left blank), the profiler will run against the entire table.
|
||||
$$
|
||||
|
||||
$$section
|
||||
### Sampling Method Type $(id="samplingMethodType")
|
||||
|
||||
**This parameter is effective for Snowflake only**
|
||||
|
||||
The sampling method type can be set to **BERNOULLI** or **SYSTEM**. You can find the difference of two values in the document of the Snowflake. When you choice **BERNOULLI**, it will scan full rows in the table even though small value is set at the **Profile Sample**. However, it has less restlictions than **SYSTEM**.
|
||||
|
||||
If no option is chosen, the default is **BERNOULLI**.
|
||||
$$
|
||||
|
||||
$$section
|
||||
### Sample Data Rows Count $(id="sampleDataCount")
|
||||
Set the number of rows to ingest when `Ingest Sample Data` toggle is on. Defaults to 50.
|
||||
|
||||
@ -103,11 +103,6 @@ The sampling method type can be set to **BERNOULLI** or **SYSTEM**. You can find
|
||||
If no option is chosen, the default is **BERNOULLI**.
|
||||
$$
|
||||
|
||||
$$section
|
||||
### Sample Data Rows Count $(id="sampleDataCount")
|
||||
Set the number of rows to ingest when `Ingest Sample Data` toggle is on. Defaults to 50.
|
||||
$$
|
||||
|
||||
$$section
|
||||
### Thread Count $(id="threadCount")
|
||||
Number of threads that will be used when computing the profiler metrics. A high number can have negative performance effect.
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user