? elastic-job作業(yè)相關的數(shù)據(jù)都是配置在zk上的,包括分片參數(shù),作業(yè)失效轉移,運行實例等等都是保存在ZK上的,那具體的zk節(jié)點的樹形結構會是什么樣子?每一個節(jié)點又是什么時候注冊到zk上的?

Job.png
? 在job的啟動過程中(JobScheduler.init()),會將啟動信息注冊到注冊中心,再看一下具體的節(jié)點信息:
public void init() {
///{jobName}/config路徑在這里
LiteJobConfiguration liteJobConfigFromRegCenter =
schedulerFacade.updateJobConfiguration(liteJobConfig); JobRegistry.getInstance().setCurrentShardingTotalCount(liteJobConfigFromRegCenter.getJobName(), liteJobConfigFromRegCenter.getTypeConfig().getCoreConfig().getShardingTotalCount());
JobScheduleController jobScheduleController = new JobScheduleController(
createScheduler(), createJobDetail(liteJobConfigFromRegCenter.getTypeConfig().getJobClass()), liteJobConfigFromRegCenter.getJobName());
JobRegistry.getInstance().registerJob(liteJobConfigFromRegCenter.getJobName(), jobScheduleController, regCenter);
/**
/{jobName}/leader/election/latch
/leader/election/instance
/{jobName}/services/{ServerIp}
/{jobName}/instances/{instanceIndex}
/{jobName}/sharding/necessary
**/
schedulerFacade.registerStartUpInfo(!liteJobConfigFromRegCenter.isDisabled());
jobScheduleController.scheduleJob(liteJobConfigFromRegCenter.getTypeConfig().getCoreConfig().getCron());
}
持久化job的配置信息,首先將job的配置信息持久到zk節(jié)點上,看代碼:
LiteJobConfiguration liteJobConfigFromRegCenter=schedulerFacade.updateJobConfiguration(liteJobConfig);
public LiteJobConfiguration updateJobConfiguration(final LiteJobConfiguration liteJobConfig) {
configService.persist(liteJobConfig);//
return configService.load(false);
}
public void persist(final LiteJobConfiguration liteJobConfig) {
checkConflictJob(liteJobConfig);
//configurationNode.ROOT=/{jobName}/config
if (!jobNodeStorage.isJobNodeExisted(ConfigurationNode.ROOT) || liteJobConfig.isOverwrite()) {
jobNodeStorage.replaceJobNode(ConfigurationNode.ROOT, LiteJobConfigurationGsonFactory.toJson(liteJobConfig));
}
}
public void replaceJobNode(final String node, final Object value) {
/**
節(jié)點:
/{jobName}/config
在這里注冊
**/
regCenter.persist(jobNodePath.getFullPath(node), value.toString());
}
在job啟動注冊啟動信息的時候,會注冊很多信息,具體如下:
//JobScheduler.init();
schedulerFacade.registerStartUpInfo(!liteJobConfigFromRegCenter.isDisabled());
public void registerStartUpInfo(final boolean enabled) {
listenerManager.startAllListeners();
/**
節(jié)點:
/{jobName}/leader/election/latch
/{jobName}/leader/election/instance
在這里實現(xiàn)
**/
leaderService.electLeader();
/**
節(jié)點:
/{jobName}/servers/{ServerIp}
在這里創(chuàng)建
**/
serverService.persistOnline(enabled);
/**
節(jié)點:
/{jobName}/instances/{instanceId}
在這里創(chuàng)建
**/
instanceService.persistOnline();
/**
節(jié)點:
/{jobName}/sharding/necessary
在這里創(chuàng)建
**/
shardingService.setReshardingFlag();
monitorService.listen();
if (!reconcileService.isRunning()) {
reconcileService.startAsync();
}
}
public void electLeader() {
log.debug("Elect a new leader now.");
//
//選舉主節(jié)點 在主節(jié)點下面創(chuàng)建節(jié)點LeaderNode.LATCH=/{jobName}/leader/election/latch
jobNodeStorage.executeInLeader(LeaderNode.LATCH, new LeaderElectionExecutionCallback());
log.debug("Leader election completed.");
}
public void executeInLeader(final String latchNode, final LeaderExecutionCallback callback) {
//
try (LeaderLatch latch = new LeaderLatch(getClient(), jobNodePath.getFullPath(latchNode))) {
latch.start();
latch.await();
//回調,注冊主節(jié)點
callback.execute();
//CHECKSTYLE:OFF
} catch (final Exception ex) {
//CHECKSTYLE:ON
handleException(ex);
}
}
//在主節(jié)點選舉完成之后,執(zhí)行callBack
@RequiredArgsConstructor
class LeaderElectionExecutionCallback implements LeaderExecutionCallback {
@Override
public void execute() {
if (!hasLeader()) {
///{jobName}/leader/election/instance 在這里
jobNodeStorage.fillEphemeralJobNode(LeaderNode.INSTANCE, JobRegistry.getInstance().getJobInstance(jobName).getJobInstanceId());
}
}
}
再看一下執(zhí)行過程,最重要的一段獲取分片上下文,在獲取分片上下文的時候,首先會判斷是不是需要重新分片,需要分片的話,重新設置分片信息,在這里會做所有相關分片的邏輯。
//AbstractElasticJobExecutor 獲取上下文
ShardingContexts shardingContexts = jobFacade.getShardingContexts();
public ShardingContexts getShardingContexts() {
boolean isFailover = configService.load(true).isFailover();
if (isFailover) {
List<Integer> failoverShardingItems = failoverService.getLocalFailoverItems();
if (!failoverShardingItems.isEmpty()) {
return executionContextService.getJobShardingContext(failoverShardingItems);
}
}
//如果需要分片,則重新分片
shardingService.shardingIfNecessary();
List<Integer> shardingItems = shardingService.getLocalShardingItems();
if (isFailover) {
shardingItems.removeAll(failoverService.getLocalTakeOffItems());
}
shardingItems.removeAll(executionService.getDisabledItems(shardingItems));
return executionContextService.getJobShardingContext(shardingItems);
}
//分片代碼
public void shardingIfNecessary() {
List<JobInstance> availableJobInstances = instanceService.getAvailableJobInstances();
if (!isNeedSharding() || availableJobInstances.isEmpty()) {
return;
}
if (!leaderService.isLeaderUntilBlock()) {
blockUntilShardingCompleted();
return;
}
waitingOtherJobCompleted();
LiteJobConfiguration liteJobConfig = configService.load(false);
int shardingTotalCount = liteJobConfig.getTypeConfig().getCoreConfig().getShardingTotalCount();
log.debug("Job '{}' sharding begin.", jobName);
//分片之前,將zk節(jié)點狀態(tài)改為processing,分片中的狀態(tài),等待分片結束
/**
/{jobName}/sharding/processing
**/
jobNodeStorage.fillEphemeralJobNode(ShardingNode.PROCESSING, "");
//重新設置分片項參數(shù)
resetShardingInfo(shardingTotalCount);
//獲取分片策略類
JobShardingStrategy jobShardingStrategy = JobShardingStrategyFactory.getStrategy(liteJobConfig.getJobShardingStrategyClass());
///分片
jobNodeStorage.executeInTransaction(new PersistShardingInfoTransactionExecutionCallback(jobShardingStrategy.sharding(availableJobInstances, jobName, shardingTotalCount)));
log.debug("Job '{}' sharding complete.", jobName);
}
/**
重新設子分片信息
**/
private void resetShardingInfo(final int shardingTotalCount) {
for (int i = 0; i < shardingTotalCount; i++) {
/** 刪除jobInstance節(jié)點
/{jobName}/sharing/{instanceIndex}分片項節(jié)點刪除
**/
jobNodeStorage.removeJobNodeIfExisted(ShardingNode.getInstanceNode(i));
/** 刪除jobInstance節(jié)點
/{jobName}/sharing/{instanceIndex}重新設置分片項
**/
jobNodeStorage.createJobNodeIfNeeded(ShardingNode.ROOT + "/" + i);
}
int actualShardingTotalCount = jobNodeStorage.getJobNodeChildrenKeys(ShardingNode.ROOT).size();
if (actualShardingTotalCount > shardingTotalCount) {
for (int i = shardingTotalCount; i < actualShardingTotalCount; i++) {
//有多余分片刪除
jobNodeStorage.removeJobNodeIfExisted(ShardingNode.ROOT + "/" + i);
}
}
}
/** 分片 **/
@RequiredArgsConstructor
class PersistShardingInfoTransactionExecutionCallback implements TransactionExecutionCallback {
private final Map<JobInstance, List<Integer>> shardingResults;
@Override
public void execute(final CuratorTransactionFinal curatorTransactionFinal) throws Exception {
for (Map.Entry<JobInstance, List<Integer>> entry : shardingResults.entrySet()) {
for (int shardingItem : entry.getValue()) {
/**
每個分片項創(chuàng)建一個實例
{jobName}/sharing/{instanceIndex}/
**/
curatorTransactionFinal.create().forPath(jobNodePath.getFullPath(ShardingNode.getInstanceNode(shardingItem)), entry.getKey().getJobInstanceId().getBytes()).and();
}
}
/**
刪除節(jié)點
/{jobName}/sharding/necessary
/{jobName}/sharding/processing
**/ curatorTransactionFinal.delete().forPath(jobNodePath.getFullPath(ShardingNode.NECESSARY)).and(); curatorTransactionFinal.delete().forPath(jobNodePath.getFullPath(ShardingNode.PROCESSING)).and();
}
}
在獲取分片上下文后,根據(jù)每個分片項判斷有無作業(yè)是運行中的狀態(tài),如果有,則標記為misfire
jobFacade.misfireIfRunning(shardingContexts.getShardingItemParameters().keySet())
public boolean misfireIfRunning(final Collection<Integer> shardingItems) {
return executionService.misfireIfHasRunningItems(shardingItems);
}
/**
* 如果當前分片項仍在運行則設置任務被錯過執(zhí)行的標記.
*
* @param items 需要設置錯過執(zhí)行的任務分片項
* @return 是否錯過本次執(zhí)行
*/
public boolean misfireIfHasRunningItems(final Collection<Integer> items) {
if (!hasRunningItems(items)) {
return false;
}
setMisfire(items);
return true;
}
/**
* 設置任務被錯過執(zhí)行的標記.
*
* @param items 需要設置錯過執(zhí)行的任務分片項
*/
public void setMisfire(final Collection<Integer> items) {
for (int each : items) {
/**
/{jobName}/{itemNum}/misfire
**/
jobNodeStorage.createJobNodeIfNeeded(ShardingNode.getMisfireNode(each));
}
}
misfire判斷結束之后,回去執(zhí)行job,執(zhí)行開始時,會將作業(yè)狀態(tài)改為running狀態(tài),作業(yè)執(zhí)行完成,將running節(jié)點刪除。
private void execute(final ShardingContexts shardingContexts, final JobExecutionEvent.ExecutionSource executionSource) {
if (shardingContexts.getShardingItemParameters().isEmpty()) {
if (shardingContexts.isAllowSendJobEvent()) {
jobFacade.postJobStatusTraceEvent(shardingContexts.getTaskId(), State.TASK_FINISHED, String.format("Sharding item for job '%s' is empty.", jobName));
}
return;
}
/**這里修改作業(yè)狀態(tài)
{jobName}/{itemNum}/running
**/
jobFacade.registerJobBegin(shardingContexts);
String taskId = shardingContexts.getTaskId();
if (shardingContexts.isAllowSendJobEvent()) {
jobFacade.postJobStatusTraceEvent(taskId, State.TASK_RUNNING, "");
}
try {
// failOver邏輯在這里
process(shardingContexts, executionSource);
} finally {
// TODO 考慮增加作業(yè)失敗的狀態(tài),并且考慮如何處理作業(yè)失敗的整體回路
// 刪除running節(jié)點
//{jobName}/{itemNum}/running
jobFacade.registerJobCompleted(shardingContexts);
if (itemErrorMessages.isEmpty()) {
if (shardingContexts.isAllowSendJobEvent()) {
jobFacade.postJobStatusTraceEvent(taskId, State.TASK_FINISHED, "");
}
} else {
if (shardingContexts.isAllowSendJobEvent()) {
jobFacade.postJobStatusTraceEvent(taskId, State.TASK_ERROR, itemErrorMessages.toString());
}
}
}
}
/**
* 注冊作業(yè)啟動信息.
*
* @param shardingContexts 分片上下文
*/
public void registerJobBegin(final ShardingContexts shardingContexts) {
JobRegistry.getInstance().setJobRunning(jobName, true);
if (!configService.load(true).isMonitorExecution()) {
return;
}
for (int each : shardingContexts.getShardingItemParameters().keySet()) {
/**這里修改作業(yè)狀態(tài)
{jobName}/{itemNum}/running
**/
jobNodeStorage.fillEphemeralJobNode(ShardingNode.getRunningNode(each), "");
}
}
/**
* 注冊作業(yè)完成信息.
*
* @param shardingContexts 分片上下文
*/
public void registerJobCompleted(final ShardingContexts shardingContexts) {
JobRegistry.getInstance().setJobRunning(jobName, false);
if (!configService.load(true).isMonitorExecution()) {
return;
}
for (int each : shardingContexts.getShardingItemParameters().keySet()) {
/**在這里刪除節(jié)點
{jobName}/{itemNum}/running
**/
jobNodeStorage.removeJobNodeIfExisted(ShardingNode.getRunningNode(each));
}
}