package org.eocencle.magnet.jsonbuilder.builder;
import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import org.eocencle.magnet.core.exception.UnsupportedException;
import org.eocencle.magnet.core.mapping.*;
import org.eocencle.magnet.core.util.CoreTag;
import org.eocencle.magnet.jsonbuilder.session.JsonProjectConfig;
import org.eocencle.magnet.jsonbuilder.util.JSONBuilderTag;
import java.util.Iterator;
/**
* 工作流建构类
* @author: huan
* @Date: 2020-05-31
* @Description:
*/
public class WorkFlowBuilder implements JSONParser {
// 单例实体
private static WorkFlowBuilder BUILDER = new WorkFlowBuilder();
private WorkFlowBuilder() {
}
/**
* 获取单例实体
* @Author huan
* @Date 2020-05-31
* @Param []
* @Return org.eocencle.magnet.jsonbuilder.builder.WorkFlowBuilder
* @Exception
* @Description
*/
public static WorkFlowBuilder getInstance() {
return BUILDER;
}
@Override
public void parse(Object parser, JsonProjectConfig config) {
JSONObject jsonObejct = (JSONObject) parser;
JSONArray paramArray = jsonObejct.getJSONArray(JSONBuilderTag.JSON_ATTR_WORKFLOW);
Iterator<Object> iterator = paramArray.iterator();
JSONObject jsonObj = null;
String type = null;
while (iterator.hasNext()) {
jsonObj = (JSONObject) iterator.next();
type = jsonObj.getString(JSONBuilderTag.JSON_ATTR_CTYPE);
if (JSONBuilderTag.WORKFLOW_TYPE_SQL.equalsIgnoreCase(type)) {
this.parseSQLElements(jsonObj, config);
} else if (JSONBuilderTag.WORKFLOW_TYPE_OUTPUT.equalsIgnoreCase(type)) {
this.parseOutputElements(jsonObj, config);
} else if (JSONBuilderTag.WORKFLOW_TYPE_GROUP.equalsIgnoreCase(type)) {
this.parseGroupElements(jsonObj, config);
} else if (JSONBuilderTag.WORKFLOW_TYPE_FILTER.equalsIgnoreCase(type)) {
this.parseFilterElements(jsonObj, config);
} else if (JSONBuilderTag.WORKFLOW_TYPE_DISTINCT.equalsIgnoreCase(type)) {
this.parseDistinctElements(jsonObj, config);
} else if (JSONBuilderTag.WORKFLOW_TYPE_ORDER.equalsIgnoreCase(type)) {
this.parseOrderElements(jsonObj, config);
} else if (JSONBuilderTag.WORKFLOW_TYPE_UNION.equalsIgnoreCase(type)) {
this.parseUnionElements(jsonObj, config);
} else if (JSONBuilderTag.WORKFLOW_TYPE_JOIN.equalsIgnoreCase(type)) {
this.parseJoinElements(jsonObj, config);
} else if (JSONBuilderTag.WORKFLOW_TYPE_SCHEMA.equalsIgnoreCase(type)) {
this.parseSchemaElements(jsonObj, config);
} else if (JSONBuilderTag.WORKFLOW_TYPE_VALUEMAPPERS.equalsIgnoreCase(type)) {
this.parseValueMappersElements(jsonObj, config);
} else if (JSONBuilderTag.WORKFLOW_TYPE_SPLITFIELDTOROWS.equalsIgnoreCase(type)) {
this.parseSplitFieldToRowsElements(jsonObj, config);
} else if (JSONBuilderTag.WORKFLOW_TYPE_STRINGCUTS.equalsIgnoreCase(type)) {
this.parseStringCutsElements(jsonObj, config);
} else if (JSONBuilderTag.WORKFLOW_TYPE_ADDFIELDS.equalsIgnoreCase(type)) {
this.parseAddFieldsElements(jsonObj, config);
} else if (JSONBuilderTag.WORKFLOW_TYPE_ADDSEQUENCE.equalsIgnoreCase(type)) {
this.parseAddSequenceElements(jsonObj, config);
} else if (JSONBuilderTag.WORKFLOW_TYPE_ROWNUM.equalsIgnoreCase(type)) {
this.parseRowNumElements(jsonObj, config);
} else {
throw new UnsupportedException(type + " component is not supported");
}
}
}
/**
* 解析SQL元素
* @Author huan
* @Date 2020-06-13
* @Param [jsonObj, config]
* @Return void
* @Exception
* @Description
*/
private void parseSQLElements(JSONObject jsonObj, JsonProjectConfig config) {
SQLInfo sqlInfo = new SQLInfo();
sqlInfo.setId(jsonObj.getString(JSONBuilderTag.JSON_ATTR_ID));
sqlInfo.setAlias(jsonObj.getString(JSONBuilderTag.JSON_ATTR_ALIAS));
sqlInfo.setSql(jsonObj.getString(JSONBuilderTag.JSON_ATTR_SQL));
config.putWorkFlowInfo(sqlInfo.getId(), sqlInfo);
}
/**
* 解析输出元素
* @Author huan
* @Date 2020-06-13
* @Param [jsonObj, config]
* @Return void
* @Exception
* @Description
*/
private void parseOutputElements(JSONObject jsonObj, JsonProjectConfig config) {
OutputInfo outputInfo = new OutputInfo();
outputInfo.setId(jsonObj.getString(JSONBuilderTag.JSON_ATTR_ID));
outputInfo.setRef(jsonObj.getString(JSONBuilderTag.JSON_ATTR_REF));
String style = jsonObj.getString(JSONBuilderTag.JSON_ATTR_STYLE);
if (null == style) {
outputInfo.setStyle(JSONBuilderTag.OUTPUT_STYLE_FILE);
} else {
outputInfo.setStyle(style);
}
outputInfo.setTarget(jsonObj.getString(JSONBuilderTag.JSON_ATTR_TARGET));
String type = jsonObj.getString(JSONBuilderTag.JSON_ATTR_TYPE);
if (null == type) {
outputInfo.setType(JSONBuilderTag.OUTPUT_TYPE_CREATE);
} else {
outputInfo.setType(type);
}
String compress = jsonObj.getString(JSONBuilderTag.JSON_ATTR_COMPRESS);
if (null == compress) {
outputInfo.setCompress(JSONBuilderTag.COMPRESS_NONE);
} else {
outputInfo.setCompress(compress);
}
String separator = jsonObj.getString(JSONBuilderTag.JSON_ATTR_SEPARATOR);
if (null == separator) {
outputInfo.setSeparator(JSONBuilderTag.SPLIT_INVISIBLE1);
} else {
outputInfo.setSeparator(separator);
}
config.putWorkFlowInfo(outputInfo.getId(), outputInfo);
}
/**
* 解析分组元素
* @Author huan
* @Date 2020-06-13
* @Param [jsonObj, config]
* @Return void
* @Exception
* @Description
*/
private void parseGroupElements(JSONObject jsonObj, JsonProjectConfig config) {
GroupInfo groupInfo = new GroupInfo();
groupInfo.setId(jsonObj.getString(JSONBuilderTag.JSON_ATTR_ID));
groupInfo.setAlias(jsonObj.getString(JSONBuilderTag.JSON_ATTR_ALIAS));
groupInfo.setRef(jsonObj.getString(JSONBuilderTag.JSON_ATTR_REF));
String field = jsonObj.getString(JSONBuilderTag.JSON_ATTR_FIELD);
String[] fields = field.trim().split(JSONBuilderTag.SPLIT_COMMA);
for (String val: fields) {
groupInfo.addGroupField(new GroupInfo.GroupField(val.trim()));
}
String[] orders = jsonObj.getString(JSONBuilderTag.JSON_ATTR_ORDER).split(CoreTag.SPLIT_COMMA);
String[] key = null;
for (String order: orders) {
key = order.trim().split(CoreTag.SPLIT_BLANK);
if (2 == key.length) {
groupInfo.addOrderField(new GroupInfo.OrderField(key[0].trim(), key[1].trim()));
} else {
groupInfo.addOrderField(new GroupInfo.OrderField(key[0].trim()));
}
}
groupInfo.setRownumField(jsonObj.getString(JSONBuilderTag.JSON_ATTR_ROWNUM));
groupInfo.setStreamState(jsonObj.getString(JSONBuilderTag.JSON_ATTR_STREAM_STATE));
config.putWorkFlowInfo(groupInfo.getId(), groupInfo);
}
/**
* 解析过滤元素
* @Author huan
* @Date 2020-06-13
* @Param [jsonObj, config]
* @Return void
* @Exception
* @Description
*/
private void parseFilterElements(JSONObject jsonObj, JsonProjectConfig config) {
FilterInfo filterInfo = new FilterInfo();
filterInfo.setId(jsonObj.getString(JSONBuilderTag.JSON_ATTR_ID));
filterInfo.setAlias(jsonObj.getString(JSONBuild
没有合适的资源?快使用搜索试试~ 我知道了~
简单实用的分布式大数据处理框架,特点是零基础操作,支持批处理和流式处理
共331个文件
java:318个
xml:10个
dtd:2个
1.该资源内容由用户上传,如若侵权请联系客服进行举报
2.虚拟产品一经售出概不退款(资源遇到问题,请及时私信上传者)
2.虚拟产品一经售出概不退款(资源遇到问题,请及时私信上传者)
版权申诉
0 下载量 5 浏览量
2023-06-21
12:34:14
上传
评论
收藏 340KB ZIP 举报
温馨提示
简单实用的分布式大数据处理框架,特点是零基础操作,支持批处理和流式处理。项目目前由core、xmlbuilder、jsonbuilder、spark1、spark2、flink1和client七个模块组成。Magnet提供统一的配置接口,只需要配置一次就可以在任何引擎上处理大数据,真正做到“ 一次编写,到处执行 ”的效果。项目具备优秀的可扩展性,设计良好的接口可以兼容任何大数据引擎
资源推荐
资源详情
资源评论
收起资源包目录
简单实用的分布式大数据处理框架,特点是零基础操作,支持批处理和流式处理 (331个子文件)
magnet.dtd 15KB
datasource.dtd 631B
WorkFlowBuilder.java 22KB
SparkDefaultGrouper.java 16KB
SparkDefaultGrouper.java 16KB
ComponentFactory.java 11KB
Spark1ComponentFactory.java 10KB
CoreTag.java 10KB
Spark2ComponentFactory.java 10KB
WorkStageComponentBuilder.java 10KB
SparkUtil.java 9KB
SparkUtil.java 9KB
XPathParser.java 9KB
XNode.java 9KB
SQLScriptParser.java 9KB
Resources.java 7KB
VariableBuilder.java 7KB
XMLBuilderTag.java 7KB
SparkValueMappersWorkStage.java 7KB
JSONBuilderTag.java 7KB
SparkValueMappersWorkStage.java 7KB
KafkaOffsetSaveToMysql.java 6KB
Flink1ComponentFactory.java 6KB
KafkaOffsetSaveToMysql.java 6KB
ForEachSQLNode.java 6KB
SparkCollectExceptionWrapper.java 5KB
SparkCollectExceptionWrapper.java 5KB
WorkStageComposite.java 5KB
DataSourceBuilder.java 5KB
ClassLoaderWrapper.java 5KB
QueryBuilder.java 4KB
SchemaTupleFactory.java 4KB
SchemaTupleFactory.java 4KB
SparkStreamDataLoadWorkStage.java 4KB
SparkStreamDataLoadWorkStage.java 4KB
Spark1Context.java 4KB
SparkDefaultFilterCondition.java 4KB
SparkStreamWorkStage.java 4KB
SparkStreamWorkStage.java 4KB
SparkDefaultFilterCondition.java 4KB
SparkDataBaseTableLoader.java 4KB
KafkaOffsetDefaultManager.java 4KB
SparkStringCutsDefaultHandler.java 4KB
KafkaOffsetDefaultManager.java 4KB
SparkStringCutsDefaultHandler.java 4KB
SparkSampleOutputer.java 4KB
SparkSampleOutputer.java 4KB
TrimSQLNode.java 4KB
ParameterBuilder.java 4KB
SparkDistinctDefaultHandler.java 3KB
SparkDistinctDefaultHandler.java 3KB
ParameterReplaceWrapper.java 3KB
EmailUtil.java 3KB
XMLConfigurationBuilder.java 3KB
SparkDataBaseTableLoader.java 3KB
Context.java 3KB
ProjectConfig.java 3KB
JSONConfigurationBuilder.java 3KB
GroupInfo.java 3KB
Spark2Context.java 3KB
FilterBuilder.java 3KB
SparkFileOutputer.java 3KB
SparkFileOutputer.java 3KB
SparkDataBaseOutputer.java 3KB
SparkDataBaseOutputer.java 3KB
JoinBuilder.java 3KB
FileTableBuilder.java 3KB
SparkDefaultSplitFieldToRowsHandler.java 3KB
SparkDefaultSplitFieldToRowsHandler.java 3KB
StreamBuilder.java 3KB
GroupBuilder.java 3KB
DynamicContext.java 3KB
OutputBuilder.java 3KB
ValueMappersBuilder.java 3KB
AddFieldsBuilder.java 3KB
WorkFlowBuilder.java 3KB
SparkSQLTableRegisterWrapper.java 3KB
SparkSQLTableRegisterWrapper.java 3KB
SparkAddFieldsDefaultHandler.java 3KB
BranchBuilder.java 3KB
SplitFieldToRowsBuilder.java 2KB
WorkStageComponentWrapper.java 2KB
DistinctBuilder.java 2KB
SparkAddFieldsDefaultHandler.java 2KB
StringCutsBuilder.java 2KB
WrapperManager.java 2KB
DataBaseTableBuilder.java 2KB
SparkJsonTableLoader.java 2KB
SparkTableWorkStage.java 2KB
SparkTableWorkStage.java 2KB
DefaultTableBuilder.java 2KB
CollectResultWrapper.java 2KB
SparkJsonTableLoader.java 2KB
ValueMappersInfo.java 2KB
ProcessModeValidation.java 2KB
AddSequenceBuilder.java 2KB
SQLScriptParserWrapper.java 2KB
XMLIncludeTransformer.java 2KB
StrictMap.java 2KB
SparkSQLWorkStage.java 2KB
共 331 条
- 1
- 2
- 3
- 4
资源评论
Java程序员-张凯
- 粉丝: 1w+
- 资源: 6732
上传资源 快速赚钱
- 我的内容管理 展开
- 我的资源 快来上传第一个资源
- 我的收益 登录查看自己的收益
- 我的积分 登录查看自己的积分
- 我的C币 登录后查看C币余额
- 我的收藏
- 我的下载
- 下载帮助
安全验证
文档复制为VIP权益,开通VIP直接复制
信息提交成功