/** * Initialize a new instance that handles commands from the supplied queue. * * @param clientOptions client options for this connection, must not be {@literal null} * @param clientResources client resources for this connection, must not be {@literal null}. */ public PubSubEndpoint(ClientOptions clientOptions, ClientResources clientResources) { super(clientOptions, clientResources); this.channels = ConcurrentHashMap.newKeySet(); this.patterns = ConcurrentHashMap.newKeySet(); }
public ServerStateNode(ServerName serverName) { this.serverName = serverName; this.regions = ConcurrentHashMap.newKeySet(); }
@ServerEndpoint("/push") public class Push { private static final Set<Session> SESSIONS = ConcurrentHashMap.newKeySet(); @OnOpen public void onOpen(Session session) { SESSIONS.add(session); } @OnClose public void onClose(Session session) { SESSIONS.remove(session); } public static void sendAll(String text) { synchronized (SESSIONS) { for (Session session : SESSIONS) { if (session.isOpen()) { session.getAsyncRemote().sendText(text); } } } } }
private static <T> Predicate<T> distinctByKey(Function<? super T, ?> keyExtractor) { Set<Object> seen = ConcurrentHashMap.newKeySet(); return t -> seen.add(keyExtractor.apply(t)); } }
public CoarseSessionAttributes(Map<String, Object> attributes, Mutator mutator, Marshallability marshallability, CacheProperties properties) { super(attributes); this.attributes = attributes; this.mutations = !properties.isTransactional() ? ConcurrentHashMap.newKeySet() : null; this.mutator = mutator; this.marshallability = marshallability; this.properties = properties; }
/** * @return all connections that are connected. */ @Deprecated protected Collection<StatefulRedisConnection<K, V>> allConnections() { Set<StatefulRedisConnection<K, V>> set = ConcurrentHashMap.newKeySet(); connectionProvider.forEach(set::add); return set; }
private CacheObjectCollection(Collection<DefaultCache.CacheObject<V>> cachedObjects) { this.cachedObjects = new ArrayList<>(cachedObjects); }
private EvictionTrigger(long evictionId, List<Callable<Boolean>> updatedTypeTriggers, StandardJanusGraph graph) { this.graph = graph; this.evictionId = evictionId; this.updatedTypeTriggers = updatedTypeTriggers; final JanusGraphManagement mgmt = graph.openManagement(); this.instancesToBeAcknowledged = ConcurrentHashMap.newKeySet(); ((ManagementSystem) mgmt).getOpenInstancesInternal().forEach(instancesToBeAcknowledged::add); mgmt.rollback(); }
/** * Constructor, loads a reader and it's indices, etc. May allocate a substantial amount of ram * depending on the underlying files (10-20MB?). * @param fs fs The current file system to use. * @param fileInfo The store file information. * @param conf The current configuration. * @param cacheConf The cache configuration and block cache reference. * @param cfBloomType The bloom type to use for this store file as specified by column * family configuration. This may or may not be the same as the Bloom filter type * actually present in the HFile, because column family configuration might change. If * this is {@link BloomType#NONE}, the existing Bloom filter is ignored. * @param primaryReplica true if this is a store file for primary replica, otherwise false. */ public HStoreFile(FileSystem fs, StoreFileInfo fileInfo, Configuration conf, CacheConfig cacheConf, BloomType cfBloomType, boolean primaryReplica) { this.streamReaders = ConcurrentHashMap.newKeySet(); this.fs = fs; this.fileInfo = fileInfo; this.cacheConf = cacheConf; this.noReadahead = conf.getBoolean(STORE_FILE_READER_NO_READAHEAD, DEFAULT_STORE_FILE_READER_NO_READAHEAD); if (BloomFilterFactory.isGeneralBloomEnabled(conf)) { this.cfBloomType = cfBloomType; } else { LOG.info("Ignoring bloom filter check for file " + this.getPath() + ": " + "cfBloomType=" + cfBloomType + " (disabled in config)"); this.cfBloomType = BloomType.NONE; } this.primaryReplica = primaryReplica; }
switch (impl) { case 1: AS = ConcurrentHashMap.newKeySet(); break; case 2:
private static Set<String> customExcludedClasses = new ConcurrentHashMap<String, String>().newKeySet(); private static Set<String> systemPrefixes = new ConcurrentHashMap<String, Boolean>().newKeySet(); private static Set<String> nonSystemTypes = new ConcurrentHashMap<String, Boolean>().newKeySet(); private static Set<String> nonSystemTypePackages = new ConcurrentHashMap<String, Boolean>().newKeySet();
readerThread.start(); Thread[] t = new Thread[concurrency]; final Set<String> names = ConcurrentHashMap.newKeySet(); for (int i = 0; i < concurrency; i++) { t[i] = new Thread() {
@Test(timeout=180000) public void testBulkOps() throws Exception { Set<Long> universalSet = ConcurrentHashMap.newKeySet(); List<Future<?>> futures = new ArrayList<>();
@Test(timeout=180000) public void testCRUD() throws Exception { Set<Long> universalSet = ConcurrentHashMap.newKeySet(); List<Future<?>> futures = new ArrayList<>();
@Test public void testOffloadConflict() throws Exception { Set<Pair<Long, UUID>> deleted = ConcurrentHashMap.newKeySet(); CompletableFuture<Set<Long>> errorLedgers = new CompletableFuture<>(); Set<Pair<Long, UUID>> failedOffloads = ConcurrentHashMap.newKeySet(); Set<Long> errorSet = ConcurrentHashMap.newKeySet(); errorSet.add(ledger.getLedgersInfoAsList().get(0).getLedgerId()); errorLedgers.complete(errorSet);
@Ignore("This is currently unstable as if the clear does not complete before the failover," + "there is no future operation that will trigger the code in ClusterTierActiveEntity.invokeServerStoreOperation" + "dealing with in-flight invalidation reconstructed from reconnect data") @Test(timeout=180000) public void testClear() throws Exception { List<Future<?>> futures = new ArrayList<>(); Set<Long> universalSet = ConcurrentHashMap.newKeySet(); caches.forEach(cache -> { for (int i = 0; i < NUM_OF_THREADS; i++) { Map<Long, BlobValue> map = random.longs().limit(JOB_SIZE).collect(HashMap::new, (hashMap, x) -> hashMap.put(x, new BlobValue()), HashMap::putAll); futures.add(executorService.submit(() -> { cache.putAll(map); universalSet.addAll(map.keySet()); })); } }); drainTasks(futures); universalSet.forEach(x -> { CACHE1.get(x); CACHE2.get(x); }); Future<?> clearFuture = executorService.submit(() -> CACHE1.clear()); CLUSTER.getClusterControl().terminateActive(); clearFuture.get(); universalSet.forEach(x -> assertThat(CACHE2.get(x), nullValue())); }
@Test public void testOffloadDelete() throws Exception { Set<Pair<Long, UUID>> deleted = ConcurrentHashMap.newKeySet(); CompletableFuture<Set<Long>> errorLedgers = new CompletableFuture<>(); Set<Pair<Long, UUID>> failedOffloads = ConcurrentHashMap.newKeySet(); MockLedgerOffloader offloader = new MockLedgerOffloader(); ManagedLedgerConfig config = new ManagedLedgerConfig(); config.setMaxEntriesPerLedger(10); config.setMinimumRolloverTime(0, TimeUnit.SECONDS); config.setRetentionTime(0, TimeUnit.MINUTES); config.setLedgerOffloader(offloader); ManagedLedgerImpl ledger = (ManagedLedgerImpl)factory.open("my_test_ledger", config); ManagedCursor cursor = ledger.openCursor("foobar"); for (int i = 0; i < 15; i++) { String content = "entry-" + i; ledger.addEntry(content.getBytes()); } Assert.assertEquals(ledger.getLedgersInfoAsList().size(), 2); ledger.offloadPrefix(ledger.getLastConfirmedEntry()); Assert.assertEquals(ledger.getLedgersInfoAsList().size(), 2); Assert.assertEquals(ledger.getLedgersInfoAsList().stream() .filter(e -> e.getOffloadContext().getComplete()).count(), 1); Assert.assertTrue(ledger.getLedgersInfoAsList().get(0).getOffloadContext().getComplete()); long firstLedger = ledger.getLedgersInfoAsList().get(0).getLedgerId(); long secondLedger = ledger.getLedgersInfoAsList().get(1).getLedgerId(); cursor.markDelete(ledger.getLastConfirmedEntry()); assertEventuallyTrue(() -> ledger.getLedgersInfoAsList().size() == 1); Assert.assertEquals(ledger.getLedgersInfoAsList().get(0).getLedgerId(), secondLedger); assertEventuallyTrue(() -> offloader.deletedOffloads().contains(firstLedger)); }
tasksQueued = new ConcurrentHashMap<>().newKeySet();
protected void deactivate() { lastKnownInterfaceAddresses = Collections.emptyList(); networkAddressChangeListeners = ConcurrentHashMap.newKeySet(); if (networkInterfacePollFuture != null) { networkInterfacePollFuture.cancel(true); networkInterfacePollFuture = null; } }