mirror of
https://github.com/datahub-project/datahub.git
synced 2025-10-13 09:54:10 +00:00
Add confidential and recursive column to dict_dataset_field
This commit is contained in:
parent
d46a9d8b8e
commit
7cbda15b5a
@ -590,11 +590,12 @@ public class DatasetInfoDao {
|
||||
String isActive = "N";
|
||||
if (ownerInfo.containsKey("app_id")) {
|
||||
appId = StringUtil.toInt(ownerInfo.get("app_id"));
|
||||
isActive = appId == 301 ? "Y" : appId == 300 ? StringUtil.toStr(ownerInfo.get("is_active")) : "N";
|
||||
isActive = appId == 301 ? "Y" : appId == 300 ? (String) ownerInfo.get("is_active") : "N";
|
||||
}
|
||||
record.setAppId(appId);
|
||||
record.setIsActive(isActive);
|
||||
record.setIsGroup(record.getOwnerType().equalsIgnoreCase("group") ? "Y" : "N");
|
||||
String ownerTypeString = record.getOwnerType();
|
||||
record.setIsGroup(ownerTypeString != null && ownerTypeString.equalsIgnoreCase("group") ? "Y" : "N");
|
||||
sortId++;
|
||||
record.setSortId(sortId);
|
||||
|
||||
@ -612,17 +613,16 @@ public class DatasetInfoDao {
|
||||
for (DatasetOwnerRecord rec : ownerList) {
|
||||
for (Map<String, Object> old : oldOwnerList) {
|
||||
if (rec.getDatasetId().equals(StringUtil.toInt(old.get("dataset_id"))) && rec.getOwner()
|
||||
.equals(StringUtil.toStr(old.get("owner_id"))) && rec.getAppId()
|
||||
.equals(StringUtil.toInt(old.get("app_id")))) {
|
||||
rec.setDbIds(StringUtil.toStr(old.get("db_ids")));
|
||||
.equals(old.get("owner_id")) && rec.getAppId().equals(StringUtil.toInt(old.get("app_id")))) {
|
||||
rec.setDbIds((String) old.get("db_ids"));
|
||||
rec.setCreatedTime(StringUtil.toLong(old.get("created_time")));
|
||||
|
||||
// take the higher priority owner category
|
||||
rec.setOwnerCategory(
|
||||
OwnerType.chooseOwnerType(rec.getOwnerCategory(), StringUtil.toStr(old.get("owner_type"))));
|
||||
OwnerType.chooseOwnerType(rec.getOwnerCategory(), (String) old.get("owner_type")));
|
||||
|
||||
// merge owner source as comma separated list
|
||||
rec.setOwnerSource(mergeOwnerSource(rec.getOwnerSource(), StringUtil.toStr(old.get("owner_source"))));
|
||||
rec.setOwnerSource(mergeOwnerSource(rec.getOwnerSource(), (String) old.get("owner_source")));
|
||||
|
||||
// remove from owner source?
|
||||
}
|
||||
@ -853,21 +853,18 @@ public class DatasetInfoDao {
|
||||
field.setDatasetId(datasetId);
|
||||
String fieldPath = field.getFieldPath();
|
||||
int lastIndex = fieldPath.lastIndexOf('.'); // if not found, index = -1
|
||||
if (lastIndex >= 0) {
|
||||
field.setParentPath(fieldPath.substring(0, lastIndex));
|
||||
}
|
||||
field.setFieldName(fieldPath.substring(lastIndex + 1));
|
||||
field.setParentPath(lastIndex > 0 ? fieldPath.substring(0, lastIndex) : "");
|
||||
|
||||
// merge old info into updated list
|
||||
for (Map<String, Object> old : oldInfo) {
|
||||
if (field.getDatasetId().equals(StringUtil.toInt(old.get("dataset_id"))) && field.getFieldName()
|
||||
.equals(StringUtil.toStr(old.get("field_name"))) && field.getParentPath()
|
||||
.equals(StringUtil.toStr(old.get("parent_path")))) {
|
||||
field.setNamespace(StringUtil.toStr(old.get("namespace")));
|
||||
field.setCommentIds(StringUtil.toStr(old.get("comment_ids")));
|
||||
if (datasetId.equals(StringUtil.toInt(old.get("dataset_id"))) && field.getFieldName()
|
||||
.equals(old.get("field_name")) && field.getParentPath().equals(old.get("parent_path"))) {
|
||||
field.setNamespace((String) old.get("namespace"));
|
||||
field.setCommentIds((String) old.get("comment_ids"));
|
||||
field.setDefaultCommentId(StringUtil.toInt(old.get("default_comment_id")));
|
||||
field.setPartitioned(StringUtil.toStr(old.get("is_partitioned")).equals("Y"));
|
||||
field.setPartitioned(StringUtil.toStr(old.get("is_indexed")).equals("Y"));
|
||||
field.setPartitioned("Y".equals(old.get("is_partitioned")));
|
||||
field.setIndexed("Y".equals(old.get("is_indexed")));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -180,6 +180,8 @@ CREATE TABLE `dict_field_detail` (
|
||||
COMMENT 'only in RDBMS',
|
||||
`is_distributed` TINYINT(4) NULL
|
||||
COMMENT 'only in RDBMS',
|
||||
`is_recursive` CHAR(1) NULL,
|
||||
`confidential_flag` VARCHAR(200) NULL,
|
||||
`default_value` VARCHAR(200) NULL,
|
||||
`namespace` VARCHAR(200) NULL,
|
||||
`java_data_type` VARCHAR(50) NULL
|
||||
@ -220,7 +222,7 @@ CREATE TABLE `stg_dict_dataset_field_comment` (
|
||||
`dataset_id` int(11) UNSIGNED NOT NULL,
|
||||
`db_id` smallint(6) unsigned NOT NULL DEFAULT '0',
|
||||
PRIMARY KEY (`field_id`,`comment_id`, `db_id`)
|
||||
) ENGINE=InnoDB
|
||||
) ENGINE=InnoDB
|
||||
DEFAULT CHARSET=utf8
|
||||
PARTITION BY HASH(db_id)
|
||||
PARTITIONS 8
|
||||
@ -273,7 +275,7 @@ CREATE TABLE `field_comments` (
|
||||
DEFAULT CHARSET = utf8;
|
||||
|
||||
-- dict_dataset_instance
|
||||
CREATE TABLE dict_dataset_instance (
|
||||
CREATE TABLE dict_dataset_instance (
|
||||
dataset_id int(11) UNSIGNED NOT NULL,
|
||||
db_id smallint(6) UNSIGNED COMMENT 'FK to cfg_database' NOT NULL DEFAULT '0',
|
||||
deployment_tier enum('local','grid','dev','int','ei','ei2','ei3','qa','stg','prod') NOT NULL DEFAULT 'dev',
|
||||
@ -307,15 +309,15 @@ AUTO_INCREMENT = 0
|
||||
PARTITION p6,
|
||||
PARTITION p7);
|
||||
|
||||
CREATE INDEX logical_name USING BTREE
|
||||
CREATE INDEX logical_name USING BTREE
|
||||
ON dict_dataset_instance(logical_name);
|
||||
CREATE INDEX server_cluster USING BTREE
|
||||
CREATE INDEX server_cluster USING BTREE
|
||||
ON dict_dataset_instance(server_cluster, deployment_tier, data_center, slice);
|
||||
CREATE INDEX native_name USING BTREE
|
||||
CREATE INDEX native_name USING BTREE
|
||||
ON dict_dataset_instance(native_name);
|
||||
|
||||
|
||||
CREATE TABLE stg_dict_dataset_instance (
|
||||
CREATE TABLE stg_dict_dataset_instance (
|
||||
dataset_urn varchar(200) NOT NULL,
|
||||
db_id smallint(6) UNSIGNED NOT NULL DEFAULT '0',
|
||||
deployment_tier enum('local','grid','dev','int','ei','ei2','ei3','qa','stg','prod') NOT NULL DEFAULT 'dev',
|
||||
@ -348,6 +350,6 @@ AUTO_INCREMENT = 0
|
||||
PARTITION p5,
|
||||
PARTITION p6,
|
||||
PARTITION p7);
|
||||
CREATE INDEX server_cluster USING BTREE
|
||||
CREATE INDEX server_cluster USING BTREE
|
||||
ON stg_dict_dataset_instance(server_cluster, deployment_tier, data_center, slice);
|
||||
|
||||
|
@ -63,13 +63,15 @@ public class DatasetFieldSchemaRecord extends AbstractRecord {
|
||||
public String[] getFieldDetailColumns() {
|
||||
return new String[]{"dataset_id", "sort_id", "parent_sort_id", "parent_path", "field_name", "fields_layout_id",
|
||||
"field_label", "data_type", "data_size", "data_precision", "data_fraction", "is_nullable", "is_indexed",
|
||||
"is_partitioned", "default_value", "namespace", "default_comment_id", "comment_ids"};
|
||||
"is_partitioned", "is_recursive", "confidential_flags", "default_value", "namespace", "default_comment_id",
|
||||
"comment_ids"};
|
||||
}
|
||||
|
||||
public Object[] getFieldDetailValues() {
|
||||
return new Object[]{datasetId, position, parentFieldPosition, parentPath, fieldName, 0, label, type, maxCharLength,
|
||||
precision, scale, nullable != null && nullable ? "Y" : "N", indexed != null && indexed ? "Y" : "N",
|
||||
partitioned != null && partitioned ? "Y" : "N", defaultValue, namespace, defaultCommentId, commentIds};
|
||||
partitioned != null && partitioned ? "Y" : "N", isRecursive != null && isRecursive ? "Y" : "N",
|
||||
confidentialFlags, defaultValue, namespace, defaultCommentId, commentIds};
|
||||
}
|
||||
|
||||
public DatasetFieldSchemaRecord() {
|
||||
|
Loading…
x
Reference in New Issue
Block a user