OpenMetadata/bootstrap/sql/org.postgresql.Driver/v009__create_db_connection_info.sql
Pere Miquel Brull b5cb1d464a
Deprecate location and old storage service (#11004)
* Deprecate location and old storage service

* Format

* Fix test

* Refactor

* Clean location

* Rename object store to storage

* Rename object store to storage

* Rename object store to storage

* Format

* Format

* Refactor object store for storage

* Refactor object store for storage

* Rename object store to storage

* Fix test

* Fix test

* Format

* chore(ui): change Objectstore to  Storage

* Fixes

* Fix test

* Remove storage service from Glue cypress

---------

Co-authored-by: Sachin Chaurasiya <sachinchaurasiyachotey87@gmail.com>
2023-04-12 11:44:46 +02:00

147 lines
6.3 KiB
SQL

-- Unique constraint for user email address
ALTER TABLE user_entity
ADD UNIQUE (email);
-- Remove classificationName in BigQuery
UPDATE dbservice_entity SET json = json #- '{connection,config,classificationName}' where serviceType in ('BigQuery');
-- migrate ingestAllDatabases in postgres
UPDATE dbservice_entity de2
SET json = JSONB_SET(
json || JSONB_SET(json,'{connection,config}', json#>'{connection,config}'||
jsonb_build_object('database',
(SELECT json->>'name'
FROM database_entity de
WHERE id = (SELECT er.toId
FROM entity_relationship er
WHERE er.fromId = de2.id
AND er.toEntity = 'database'
LIMIT 1)
)
)),
'{connection,config,ingestAllDatabases}',
'true'::jsonb
)
WHERE de2.serviceType = 'Postgres'
AND json->>'{connection,config,database}' IS NULL;
CREATE TABLE IF NOT EXISTS storage_container_entity (
id VARCHAR(36) GENERATED ALWAYS AS (json ->> 'id') STORED NOT NULL,
fullyQualifiedName VARCHAR(256) GENERATED ALWAYS AS (json ->> 'fullyQualifiedName') STORED NOT NULL,
json JSONB NOT NULL,
updatedAt BIGINT GENERATED ALWAYS AS ((json ->> 'updatedAt')::bigint) STORED NOT NULL,
updatedBy VARCHAR(256) GENERATED ALWAYS AS (json ->> 'updatedBy') STORED NOT NULL,
deleted BOOLEAN GENERATED ALWAYS AS ((json ->> 'deleted')::boolean) STORED,
PRIMARY KEY (id),
UNIQUE (fullyQualifiedName)
);
CREATE TABLE IF NOT EXISTS test_connection_definition (
id VARCHAR(36) GENERATED ALWAYS AS (json ->> 'id') STORED NOT NULL,
name VARCHAR(256) GENERATED ALWAYS AS (json ->> 'name') STORED NOT NULL,
json JSONB NOT NULL,
updatedAt BIGINT GENERATED ALWAYS AS ((json ->> 'updatedAt')::bigint) STORED NOT NULL,
updatedBy VARCHAR(256) GENERATED ALWAYS AS (json ->> 'updatedBy') STORED NOT NULL,
deleted BOOLEAN GENERATED ALWAYS AS ((json ->> 'deleted')::boolean) STORED,
UNIQUE (name)
);
CREATE TABLE IF NOT EXISTS automations_workflow (
id VARCHAR(36) GENERATED ALWAYS AS (json ->> 'id') STORED NOT NULL,
name VARCHAR(256) GENERATED ALWAYS AS (json ->> 'name') STORED NOT NULL,
workflowType VARCHAR(256) GENERATED ALWAYS AS (json ->> 'workflowType') STORED NOT NULL,
status VARCHAR(256) GENERATED ALWAYS AS (json ->> 'status') STORED,
json JSONB NOT NULL,
updatedAt BIGINT GENERATED ALWAYS AS ((json ->> 'updatedAt')::bigint) STORED NOT NULL,
updatedBy VARCHAR(256) GENERATED ALWAYS AS (json ->> 'updatedBy') STORED NOT NULL,
deleted BOOLEAN GENERATED ALWAYS AS ((json ->> 'deleted')::boolean) STORED,
PRIMARY KEY (id),
UNIQUE (name)
);
-- Do not store OM server connection, we'll set it dynamically on the resource
UPDATE ingestion_pipeline_entity
SET json = json::jsonb #- '{openMetadataServerConnection}';
CREATE TABLE IF NOT EXISTS query_entity (
id VARCHAR(36) GENERATED ALWAYS AS (json ->> 'id') STORED NOT NULL,
name VARCHAR(256) GENERATED ALWAYS AS (json ->> 'name') STORED NOT NULL,
json JSONB NOT NULL,
updatedAt BIGINT GENERATED ALWAYS AS ((json ->> 'updatedAt')::bigint) STORED NOT NULL,
updatedBy VARCHAR(256) GENERATED ALWAYS AS (json ->> 'updatedBy') STORED NOT NULL,
deleted BOOLEAN GENERATED ALWAYS AS ((json ->> 'deleted')::boolean) STORED,
UNIQUE (name)
);
CREATE TABLE IF NOT EXISTS temp_query_migration (
tableId VARCHAR(36) NOT NULL,
queryId VARCHAR(36) GENERATED ALWAYS AS (json ->> 'id') STORED NOT NULL,
json JSONB NOT NULL
);
CREATE EXTENSION pgcrypto;
INSERT INTO temp_query_migration(tableId,json)
SELECT id,json_build_object('id',gen_random_uuid(),'vote',vote,'query',query,'users',users,'checksum',checksum,'duration',duration,'name','table','name',checksum,'updatedAt',
floor(EXTRACT(EPOCH FROM NOW())),'updatedBy','admin','deleted',false) AS json FROM entity_extension AS ee , jsonb_to_recordset(ee.json) AS x (vote decimal,query varchar,users json,
checksum varchar,duration decimal,queryDate varchar)
WHERE ee.extension = 'table.tableQueries';
INSERT INTO query_entity(json)
SELECT json FROM temp_query_migration;
INSERT INTO entity_relationship(fromId,toId,fromEntity,toEntity,relation)
SELECT tableId,queryId,'table','query',10 FROM temp_query_migration;
DELETE FROM entity_extension WHERE id in
(SELECT DISTINCT tableId FROM temp_query_migration) AND extension = 'table.tableQueries';
DROP TABLE temp_query_migration;
-- remove the audience if it was wrongfully sent from the UI after editing the OM service
UPDATE metadata_service_entity
SET json = json::jsonb #- '{connection,config,securityConfig,audience}'
WHERE name = 'OpenMetadata'
AND json#>'{connection,config,authProvider}' IS NOT NULL
AND json -> 'connection' -> 'config' ->> 'authProvider' != 'google';
ALTER TABLE user_tokens ALTER COLUMN expiryDate DROP NOT NULL;
DELETE FROM alert_entity;
drop table alert_action_def;
ALTER TABLE alert_entity RENAME TO event_subscription_entity;
-- create data model table
CREATE TABLE IF NOT EXISTS dashboard_data_model_entity (
id VARCHAR(36) GENERATED ALWAYS AS (json ->> 'id') STORED NOT NULL,
json JSONB NOT NULL,
updatedAt BIGINT GENERATED ALWAYS AS ((json ->> 'updatedAt')::bigint) STORED NOT NULL,
updatedBy VARCHAR(256) GENERATED ALWAYS AS (json ->> 'updatedBy') STORED NOT NULL,
deleted BOOLEAN GENERATED ALWAYS AS ((json ->> 'deleted')::boolean) STORED,
fullyQualifiedName VARCHAR(256) GENERATED ALWAYS AS (json ->> 'fullyQualifiedName') STORED NOT NULL,
PRIMARY KEY (id),
UNIQUE (fullyQualifiedName)
);
UPDATE dbservice_entity
SET json = jsonb_set(json::jsonb #- '{connection,config,database}', '{connection,config,databaseName}', json#> '{connection,config,database}', true)
WHERE servicetype = 'Druid' and json #>'{connection,config,database}' is not null;
-- We were using the same jsonSchema for Pipeline Services and Ingestion Pipeline status
-- Also, we relied on the extension to store the run id
UPDATE entity_extension_time_series
SET jsonSchema = 'ingestionPipelineStatus', extension = 'ingestionPipeline.pipelineStatus'
WHERE jsonSchema = 'pipelineStatus' AND extension <> 'pipeline.PipelineStatus';
-- We are refactoring the storage service with containers. We'll remove the locations
DROP TABLE location_entity;
UPDATE dbservice_entity
SET json = json::jsonb #- '{connection,config,storageServiceName}'
WHERE servicetype = 'Glue';
UPDATE chart_entity
SET json = json::jsonb #- '{tables}';