public void updateFailed(Throwable exception) { // We depend on pending calls to request another metadata update this.state = State.QUIESCENT; if (exception instanceof AuthenticationException) { log.warn("Metadata update failed due to authentication error", exception); this.authException = (AuthenticationException) exception; } else { log.info("Metadata update failed", exception); } }
/** * Stops logging clients. This is a blocking call. */ public void stop() { service.shutdown(); if (!service.isTerminated()) { service.shutdownNow(); try { service.awaitTermination(1000, TimeUnit.SECONDS); } catch (InterruptedException e) { LOGGER.error("exception awaiting termination", e); } } LOGGER.info("Logging clients stopped"); }
protected void logException() { if (exception instanceof JobNotFoundException || exception instanceof ActivitiTaskAlreadyClaimedException) { // reduce log level, because this may have been caused because of job deletion due to cancelActiviti="true" log.info("Error while closing command context", exception); } else if (exception instanceof ActivitiOptimisticLockingException) { // reduce log level, as normally we're not interested in logging this exception log.debug("Optimistic locking exception : " + exception); } else { log.error("Error while closing command context", exception); } }
private void print(List<List<Object>> keys, List<ValueUpdater> updaters) { for (int i = 0; i < keys.size(); i++) { ValueUpdater valueUpdater = updaters.get(i); Object arg = ((CombinerValueUpdater) valueUpdater).getArg(); LOG.info("updateCount = {}, keys = {} => updaterArgs = {}", updateCount, keys.get(i), arg); } }
private static IResultsConsumer[] getConsumersUnsafe(IResultsConsumer... additional) throws IOException { List<IResultsConsumer> consumers = new ArrayList<IResultsConsumer>(); consumers.add(new XMLConsumer(new File("jub." + Math.abs(System.nanoTime()) + ".xml"))); consumers.add(new WriterConsumer()); // defaults to System.out consumers.add(new CsvConsumer("target/jub.csv")); if (null != System.getenv(ENV_EFFORT_GENERATE)) { String file = getEffortFilePath(); Writer writer = new FileWriter(file, true); log.info("Opened " + file + " for appending"); consumers.add(new TimeScaleConsumer(writer)); } for (IResultsConsumer c : additional) { consumers.add(c); } return consumers.toArray(new IResultsConsumer[consumers.size()]); }
@Override public void handleMessage(ReleaseMessage message, String channel) { logger.info("message received - channel: {}, message: {}", channel, message); String releaseMessage = message.getMessage(); if (!Topics.APOLLO_RELEASE_TOPIC.equals(channel) || Strings.isNullOrEmpty(releaseMessage)) { return; } List<String> keys = STRING_SPLITTER.splitToList(releaseMessage); //message should be appId+cluster+namespace if (keys.size() != 3) { logger.error("message format invalid - {}", releaseMessage); return; } String appId = keys.get(0); String cluster = keys.get(1); String namespace = keys.get(2); List<GrayReleaseRule> rules = grayReleaseRuleRepository .findByAppIdAndClusterNameAndNamespaceName(appId, cluster, namespace); mergeGrayReleaseRules(rules); }
/** Output this entire chain of rides. */ public void dumpRideChain() { List<Ride> rides = Lists.newLinkedList(); Ride ride = this; while (ride != null) { rides.add(0, ride); ride = ride.previous; } LOG.info("Path from {} to {}", rides.get(0).from, rides.get(rides.size() - 1).to); for (Ride r : rides) LOG.info(" {}", r.toString()); }
protected List<String> calculateParititionIdsToOwn() { List<String> taskPartitions = new ArrayList<String>(); for (int i = this.taskIndex; i < config.getPartitionCount(); i += this.totalTasks) { taskPartitions.add(Integer.toString(i)); logger.info(String.format("taskIndex %d owns partitionId %d.", this.taskIndex, i)); } return taskPartitions; } }
@Override public void log (int level, String category, String message, Throwable ex) { final String logString = "[KRYO " + category + "] " + message; switch (level) { case Log.LEVEL_ERROR: log.error(logString, ex); break; case Log.LEVEL_WARN: log.warn(logString, ex); break; case Log.LEVEL_INFO: log.info(logString, ex); break; case Log.LEVEL_DEBUG: log.debug(logString, ex); break; case Log.LEVEL_TRACE: log.trace(logString, ex); break; } } }
public void close() { closed = true; if (serverSocket != null) { try { serverSocket.close(); } catch (IOException e) { logger.error("Failed to close serverSocket", e); } finally { serverSocket = null; } } logger.info("closing this server"); synchronized (socketNodeList) { for (SocketNode sn : socketNodeList) { sn.close(); } } if (socketNodeList.size() != 0) { logger.warn("Was expecting a 0-sized socketNodeList after server shutdown"); } }
private void notifyStart(File instanceDir, String destination, File[] instanceConfigs) { try { defaultAction.start(destination); actions.put(destination, defaultAction); // 启动成功后记录配置文件信息 InstanceConfigFiles lastFile = lastFiles.get(destination); List<FileInfo> newFileInfo = new ArrayList<FileInfo>(); for (File instanceConfig : instanceConfigs) { newFileInfo.add(new FileInfo(instanceConfig.getName(), instanceConfig.lastModified())); } lastFile.setInstanceFiles(newFileInfo); logger.info("auto notify start {} successful.", destination); } catch (Throwable e) { logger.error(String.format("scan add found[%s] but start failed", destination), e); } }
public static void interceptorsForHandler(AtmosphereFramework framework, List<Class<? extends AtmosphereInterceptor>> interceptors, List<AtmosphereInterceptor> l) { for (Class<? extends AtmosphereInterceptor> i : interceptors) { if (!framework.excludedInterceptors().contains(i.getName()) && (!AtmosphereFramework.DEFAULT_ATMOSPHERE_INTERCEPTORS.contains(i))) { try { logger.info("Adding {}", i); l.add(framework.newClassInstance(AtmosphereInterceptor.class, i)); } catch (Throwable e) { logger.warn("", e); } } } }
@Override public void initializeState(StateInitializationContext context) throws Exception { super.initializeState(context); checkState(checkpointedState == null, "The reader state has already been initialized."); checkpointedState = context.getOperatorStateStore().getSerializableListState("splits"); int subtaskIdx = getRuntimeContext().getIndexOfThisSubtask(); if (context.isRestored()) { LOG.info("Restoring state for the {} (taskIdx={}).", getClass().getSimpleName(), subtaskIdx); // this may not be null in case we migrate from a previous Flink version. if (restoredReaderState == null) { restoredReaderState = new ArrayList<>(); for (TimestampedFileInputSplit split : checkpointedState.get()) { restoredReaderState.add(split); } if (LOG.isDebugEnabled()) { LOG.debug("{} (taskIdx={}) restored {}.", getClass().getSimpleName(), subtaskIdx, restoredReaderState); } } } else { LOG.info("No state to restore for the {} (taskIdx={}).", getClass().getSimpleName(), subtaskIdx); } }
static void logException(String msg, Exception e) { if (LOG.isDebugEnabled()) { LOG.debug(msg, e); } else { LOG.info(msg + ": " + e.getMessage()); } }
TableDescriptor[] getTableDescriptors(List<TableName> tableNames) { LOG.info("getTableDescriptors == tableNames => " + tableNames); try (Connection conn = ConnectionFactory.createConnection(getConf()); Admin admin = conn.getAdmin()) { List<TableDescriptor> tds = admin.listTableDescriptors(tableNames); return tds.toArray(new TableDescriptor[tds.size()]); } catch (IOException e) { LOG.debug("Exception getting table descriptors", e); } return new TableDescriptor[0]; }
protected void rotateOutputFile(Writer writer) throws IOException { LOG.info("Rotating output file..."); long start = System.currentTimeMillis(); synchronized (this.writeLock) { writer.close(); LOG.info("Performing {} file rotation actions.", this.rotationActions.size()); for (RotationAction action : this.rotationActions) { action.execute(this.fs, writer.getFilePath()); } } long time = System.currentTimeMillis() - start; LOG.info("File rotation took {} ms.", time); }
private boolean threadShouldExit(long now, long curHardShutdownTimeMs) { if (!hasActiveExternalCalls()) { log.trace("All work has been completed, and the I/O thread is now exiting."); return true; } if (now >= curHardShutdownTimeMs) { log.info("Forcing a hard I/O thread shutdown. Requests in progress will be aborted."); return true; } log.debug("Hard shutdown in {} ms.", curHardShutdownTimeMs - now); return false; }
@Override protected void rebalanceCache() { try { getLogger().info("Rebalancing: " + this.cache); RebalanceResults results = RegionHelper.rebalanceCache(this.cache); if (getLogger().isDebugEnabled()) { getLogger().debug("Done rebalancing: " + this.cache); getLogger().debug(RegionHelper.getRebalanceResultsMessage(results)); } } catch (Exception e) { getLogger().warn("Rebalance failed because of the following exception:", e); } }