mirror of
https://github.com/datahub-project/datahub.git
synced 2025-07-22 17:10:30 +00:00
91 lines
3.5 KiB
JSON
91 lines
3.5 KiB
JSON
{
|
|
"urn:li:dataFlow:(spark,PythonHdfsIn2HiveCreateTable,spark_spark-master_7077)": {
|
|
"value": {
|
|
"com.linkedin.metadata.snapshot.DataFlowSnapshot": {
|
|
"urn": "urn:li:dataFlow:(spark,PythonHdfsIn2HiveCreateTable,spark_spark-master_7077)",
|
|
"aspects": [
|
|
{
|
|
"com.linkedin.metadata.key.DataFlowKey": {
|
|
"orchestrator": "spark",
|
|
"cluster": "spark_spark-master_7077",
|
|
"flowId": "PythonHdfsIn2HiveCreateTable"
|
|
}
|
|
},
|
|
{
|
|
"com.linkedin.datajob.DataFlowInfo": {
|
|
"name": "PythonHdfsIn2HiveCreateTable",
|
|
"customProperties": {
|
|
"sparkUser": "root",
|
|
"appName": "PythonHdfsIn2HiveCreateTable"
|
|
}
|
|
}
|
|
},
|
|
{
|
|
"com.linkedin.common.BrowsePaths": {
|
|
"paths": [
|
|
"/spark/spark_spark-master_7077"
|
|
]
|
|
}
|
|
},
|
|
{
|
|
"com.linkedin.common.DataPlatformInstance": {
|
|
"platform": "urn:li:dataPlatform:spark"
|
|
}
|
|
}
|
|
]
|
|
}
|
|
}
|
|
},
|
|
"urn:li:dataJob:(urn:li:dataFlow:(spark,PythonHdfsIn2HiveCreateTable,spark_spark-master_7077),QueryExecId_5)": {
|
|
"value": {
|
|
"com.linkedin.metadata.snapshot.DataJobSnapshot": {
|
|
"urn": "urn:li:dataJob:(urn:li:dataFlow:(spark,PythonHdfsIn2HiveCreateTable,spark_spark-master_7077),QueryExecId_5)",
|
|
"aspects": [
|
|
{
|
|
"com.linkedin.metadata.key.DataJobKey": {
|
|
"jobId": "QueryExecId_5",
|
|
"flow": "urn:li:dataFlow:(spark,PythonHdfsIn2HiveCreateTable,spark_spark-master_7077)"
|
|
}
|
|
},
|
|
{
|
|
"com.linkedin.datajob.DataJobInfo": {
|
|
"name": "saveAsTable at NativeMethodAccessorImpl.java:0",
|
|
"type": {
|
|
"string": "sparkJob"
|
|
},
|
|
"customProperties": {
|
|
"SQLQueryId": "5",
|
|
"appName": "PythonHdfsIn2HiveCreateTable",
|
|
"description": "saveAsTable at NativeMethodAccessorImpl.java:0",
|
|
"queryPlan": "CreateDataSourceTableAsSelectCommand `PythonHdfsIn2HiveCreateTable`.`foo3`, Overwrite, [a, b, c, d]\n+- Project [a#16, b#20, c#40, d#44]\n +- Join Inner, (id#10 = id#34)\n :- Project [id#10, c1#11 AS a#16, c2#12 AS b#20]\n : +- Filter isnotnull(id#10)\n : +- Relation[id#10,c1#11,c2#12] csv\n +- Project [id#34, c1#35 AS c#40, c2#36 AS d#44]\n +- Filter isnotnull(id#34)\n +- Relation[id#34,c1#35,c2#36] csv\n"
|
|
}
|
|
}
|
|
},
|
|
{
|
|
"com.linkedin.common.DataPlatformInstance": {
|
|
"platform": "urn:li:dataPlatform:spark"
|
|
}
|
|
},
|
|
{
|
|
"com.linkedin.common.BrowsePaths": {
|
|
"paths": [
|
|
"/spark/spark_spark-master_7077"
|
|
]
|
|
}
|
|
},
|
|
{
|
|
"com.linkedin.datajob.DataJobInputOutput": {
|
|
"inputDatasets": [
|
|
"urn:li:dataset:(urn:li:dataPlatform:file,file:/opt/workspace/resources/data/in1.csv,PROD)",
|
|
"urn:li:dataset:(urn:li:dataPlatform:file,file:/opt/workspace/resources/data/in2.csv,PROD)"
|
|
],
|
|
"outputDatasets": [
|
|
"urn:li:dataset:(urn:li:dataPlatform:hive,PythonHdfsIn2HiveCreateTable.foo3,PROD)"
|
|
]
|
|
}
|
|
}
|
|
]
|
|
}
|
|
}
|
|
}
|
|
} |