@Override public void sendLoadMetrics(Map<Integer, Double> taskToLoad) { _load.putAll(taskToLoad); }
/** {@inheritDoc} */ @Override public synchronized void putAll(Map<? extends K, ? extends V> m) { super.putAll(m); }
public void putAll(final Map<? extends K, ? extends V> m) { backingMap.putAll(m); }
/** * Constructs a cache and initialize the cache with the specified map * @param m the map to initialize the cache */ public UnlimitedConcurrentCache(Map<? extends K, ? extends V> m) { this(); map.putAll(m); }
@Override public void putAll(final Map<BytesKey, OpItem> map) { this.map.putAll(map); }
@Override public void putAll(Map<? extends K, ? extends V> m) { map.putAll(m); }
@SuppressWarnings("unchecked") private void restoreAccountStats(Bundle icicle) { if (icicle != null) { Map<String, AccountStats> oldStats = (Map<String, AccountStats>)icicle.get(ACCOUNT_STATS); if (oldStats != null) { accountStats.putAll(oldStats); } } }
private ColumnMappers(ColumnMappers that) { factories.addAll(that.factories); cache.putAll(that.cache); }
private RowMappers(RowMappers that) { factories.addAll(that.factories); cache.putAll(that.cache); }
/** * Creates a new map with the same mappings as the given map. * The map is created with a capacity of 1.5 times the number * of mappings in the given map or 16 (whichever is greater), * and a default load factor (0.75) and concurrencyLevel (16). * * @param m the map */ public ConcurrentHashMap(Map<? extends K, ? extends V> m) { this(Math.max((int) (m.size() / DEFAULT_LOAD_FACTOR) + 1, DEFAULT_INITIAL_CAPACITY), DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL); putAll(m); }
@Override public void putAll(Map<? extends K, ? extends V> m) { m.keySet() .stream() .forEach(k -> created.put(k, System.currentTimeMillis())); super.putAll(m); }
private void updateTaskComponentMap() throws Exception { Map<Integer, String> tmp = Common.getTaskToComponent(Cluster.get_all_taskInfo(zkCluster, topologyId)); this.tasksToComponent.putAll(tmp); LOG.info("Updated tasksToComponentMap:" + tasksToComponent); this.componentToSortedTasks.putAll(JStormUtils.reverse_map(tmp)); for (java.util.Map.Entry<String, List<Integer>> entry : componentToSortedTasks.entrySet()) { List<Integer> tasks = entry.getValue(); Collections.sort(tasks); } }
@Override public void putAll(Map<? extends K, ? extends V> m) { super.putAll(m); // drop the transient sets; will be rebuilt when/if needed clearCache(); }
@Override public void putAll(Map<? extends String, ? extends Object> m) { voProperties.putAll(m); }
@VisibleForTesting public void addTaskGroupToPendingCompletionTaskGroup( int taskGroupId, ImmutableMap<PartitionIdType, SequenceOffsetType> partitionOffsets, Optional<DateTime> minMsgTime, Optional<DateTime> maxMsgTime, Set<String> tasks, Set<PartitionIdType> exclusiveStartingSequencePartitions ) { TaskGroup group = new TaskGroup( taskGroupId, partitionOffsets, minMsgTime, maxMsgTime, exclusiveStartingSequencePartitions ); group.tasks.putAll(tasks.stream().collect(Collectors.toMap(x -> x, x -> new TaskData()))); pendingCompletionTaskGroups.computeIfAbsent(taskGroupId, x -> new CopyOnWriteArrayList<>()) .add(group); }
@Override public void putAll(Map<? extends K, ? extends V> map) { if (!hasRemovalListener() && (writer == CacheWriter.disabledWriter())) { data.putAll(map); return; } map.forEach(this::put); }
private void loadRunningExecutions() throws ExecutorManagerException { logger.info("Loading running flows from database.."); final Map<Integer, Pair<ExecutionReference, ExecutableFlow>> activeFlows = this.executorLoader .fetchActiveFlows(); logger.info("Loaded " + activeFlows.size() + " running flows"); this.runningExecutions.get().putAll(activeFlows); }
@VisibleForTesting public void addTaskGroupToActivelyReadingTaskGroup( int taskGroupId, ImmutableMap<PartitionIdType, SequenceOffsetType> partitionOffsets, Optional<DateTime> minMsgTime, Optional<DateTime> maxMsgTime, Set<String> tasks, Set<PartitionIdType> exclusiveStartingSequencePartitions ) { TaskGroup group = new TaskGroup( taskGroupId, partitionOffsets, minMsgTime, maxMsgTime, exclusiveStartingSequencePartitions ); group.tasks.putAll(tasks.stream().collect(Collectors.toMap(x -> x, x -> new TaskData()))); if (activelyReadingTaskGroups.putIfAbsent(taskGroupId, group) != null) { throw new ISE( "trying to add taskGroup with RandomIdUtils [%s] to actively reading task groups, but group already exists.", taskGroupId ); } }
public void initTaskHb() { this.taskHbs = new TopologyTaskHbInfo(this.topologyId, this.taskId); ConcurrentHashMap<Integer, TaskHeartbeat> tmpTaskHbMap = new ConcurrentHashMap<>(); try { TopologyTaskHbInfo taskHbInfo = zkCluster.topology_heartbeat(topologyId); if (taskHbInfo != null) { LOG.info("Found task heartbeat info left in zk for " + topologyId + ": " + taskHbInfo.toString()); if (taskHbInfo.get_taskHbs() != null) { tmpTaskHbMap.putAll(taskHbInfo.get_taskHbs()); } } } catch (Exception e) { LOG.warn("Failed to get topology heartbeat from zk", e); } this.taskHbMap.set(tmpTaskHbMap); taskHbs.set_taskHbs(tmpTaskHbMap); }