/** * Disable the given rules so the check methods like {@link #check(String)} won't use them. * @param ruleIds the ids of the rules to disable - no error will be thrown if the id does not exist * @since 2.4 */ public void disableRules(List<String> ruleIds) { disabledRules.addAll(ruleIds); enabledRules.removeAll(ruleIds); }
private String[] filter(String[] original, String[] supported) throws IOException { Set<String> filtered = new CopyOnWriteArraySet<>(Arrays.asList(original)); filtered.removeAll(disabled); if (filtered.isEmpty()) { filtered.addAll(Arrays.asList(supported)); filtered.removeAll(disabled); } if (filtered.isEmpty()) throw new IOException( "No supported SSL attributed enabled. " + Arrays.toString(original) + " provided, " + disabled.toString() + " disabled, " + Arrays.toString(supported) + " supported, result: " + filtered.toString()); return filtered.toArray(new String[filtered.size()]); }
@SuppressWarnings("SuspiciousMethodCalls") @Override protected boolean matchesSafely( final Map<? super K, ? super V> actual, final Description mismatchDescription) { final Set<? super K> missingKeys = new HashSet<>(items.keySet()); missingKeys.removeAll(actual.keySet()); if (!missingKeys.isEmpty()) { mismatchDescription .appendText("did not contain " + missingKeys.size() + " keys ") .appendValue(missingKeys); return false; } // Do not switch to streams, as they can't handle nulls: final Map<Object, Object> differentValues = new HashMap<>(); items.forEach((key, value) -> { final Object actualValue = actual.get(key); if (!Objects.equal(value, actualValue)) { differentValues.put(key, actualValue); } }); if (!differentValues.isEmpty()) { mismatchDescription .appendText("has different values for " + differentValues.size() + " keys ") .appendValue(differentValues); return false; } return true; }
node_res.putAll(n.getResources()); if (!node_res.keySet().equals(resources.keySet())) { StringBuilder ops = new StringBuilder(); Set<String> resource_keys = new HashSet<>(defaults.keySet()); resource_keys.addAll(nod.getResources().keySet()); ops.append("\t[ " + nod.shortString() + ", Resources Set: " + resource_keys + " ]\n"); if (node_res.keySet().containsAll(resources.keySet())) { Set<String> diffset = new HashSet<>(node_res.keySet()); diffset.removeAll(resources.keySet()); throw new RuntimeException( "Found an operation with resources set which are not set in other operations in the group:\n" + } else if (resources.keySet().containsAll(node_res.keySet())) { Set<String> diffset = new HashSet<>(resources.keySet()); diffset.removeAll(node_res.keySet()); throw new RuntimeException( "Found an operation with resources unset which are set in other operations in the group:\n" + Number val = kv.getValue(); Number newval = new Double(val.doubleValue() + resources.get(key).doubleValue()); resources.put(key, newval);
@Override public synchronized void unregister(final String connectionId, final NodeIdentifier nodeId) { final Set<AsyncLoadBalanceClient> clients = clientMap.get(nodeId); if (clients == null) { return; } final Set<AsyncLoadBalanceClient> toRemove = new HashSet<>(); for (final AsyncLoadBalanceClient client : clients) { client.unregister(connectionId); if (client.getRegisteredConnectionCount() == 0) { toRemove.add(client); } } clients.removeAll(toRemove); allClients.removeAll(toRemove); if (clients.isEmpty()) { clientMap.remove(nodeId); } logger.debug("Un-registered Connection with ID {} so that it will no longer send data to Node {}; {} clients were removed", connectionId, nodeId, toRemove.size()); }
@Override public void visitLabel(Label label) { super.visitLabel(label); Collection<Label> labels = tryCatchStart.get(label); if (labels != null) { effectiveHandlers.addAll(labels); } else { labels = tryCatchEnd.get(label); if (labels != null) { effectiveHandlers.removeAll(labels); } else { state.join(label); if (handlers.contains(label)) { state.push(new InstanceItem(THROWABLE_TYPE)); } } } visitedLabels.add(label); }
public static boolean hasValidParameters(HttpServletRequest request, HttpServletResponse response) throws IOException { EndPoint endPoint = endPoint(request); Set<String> validParamNames = VALID_ENDPOINT_PARAM_NAMES.get(endPoint); Set<String> userParams = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); userParams.addAll(request.getParameterMap().keySet()); if (validParamNames != null) { userParams.removeAll(validParamNames); } if (!userParams.isEmpty()) { // User request specifies parameters that are not a subset of the valid parameters. String errorResp = String.format("Unrecognized endpoint parameters in %s %s request: %s.", endPoint, request.getMethod(), userParams.toString()); writeErrorResponse(response, "", errorResp, SC_BAD_REQUEST, wantJSON(request)); return false; } return true; }
for (Thread t : Thread.getAllStackTraces().keySet()) { if (t.getName().startsWith("Rx")) { rxThreadsBefore.add(t); System.out.println("testStartIdempotence >> " + t); for (Thread t : Thread.getAllStackTraces().keySet()) { if (t.getName().startsWith("Rx")) { rxThreadsAfter.add(t); System.out.println("testStartIdempotence >>>> " + t); rxThreadsAfter.removeAll(rxThreadsBefore); Assert.assertTrue("Some new threads appeared: " + rxThreadsAfter, rxThreadsAfter.isEmpty());
private synchronized void updateNodes(MemoryPoolAssignmentsRequest assignments) { ImmutableSet.Builder<Node> builder = ImmutableSet.builder(); Set<Node> aliveNodes = builder .addAll(nodeManager.getNodes(ACTIVE)) .addAll(nodeManager.getNodes(SHUTTING_DOWN)) .build(); ImmutableSet<String> aliveNodeIds = aliveNodes.stream() .map(Node::getNodeIdentifier) .collect(toImmutableSet()); // Remove nodes that don't exist anymore // Make a copy to materialize the set difference Set<String> deadNodes = ImmutableSet.copyOf(difference(nodes.keySet(), aliveNodeIds)); nodes.keySet().removeAll(deadNodes); // Add new nodes for (Node node : aliveNodes) { if (!nodes.containsKey(node.getNodeIdentifier())) { nodes.put(node.getNodeIdentifier(), new RemoteNodeMemory(node, httpClient, memoryInfoCodec, assignmentsRequestJsonCodec, locationFactory.createMemoryInfoLocation(node))); } } // If work isn't scheduled on the coordinator (the current node) there is no point // in polling or updating (when moving queries to the reserved pool) its memory pools if (!isWorkScheduledOnCoordinator) { nodes.remove(nodeManager.getCurrentNode().getNodeIdentifier()); } // Schedule refresh for (RemoteNodeMemory node : nodes.values()) { node.asyncRefresh(assignments); } }
@SuppressWarnings("unchecked") private Set<String> getNeedReDownloadTopologies(Map<Integer, LocalAssignment> localAssignment) { Set<String> reDownloadTopologies = syncProcesses.getTopologyIdNeedDownload().getAndSet(null); if (reDownloadTopologies == null || reDownloadTopologies.size() == 0) return null; Set<String> needRemoveTopologies = new HashSet<>(); Map<Integer, String> portToStartWorkerId = syncProcesses.getPortToWorkerId(); for (Entry<Integer, LocalAssignment> entry : localAssignment.entrySet()) { if (portToStartWorkerId.containsKey(entry.getKey())) needRemoveTopologies.add(entry.getValue().getTopologyId()); } LOG.debug("workers are starting on these topologies, delay downloading topology binary: " + needRemoveTopologies); reDownloadTopologies.removeAll(needRemoveTopologies); if (reDownloadTopologies.size() > 0) LOG.info("Following topologies are going to re-download the jars, " + reDownloadTopologies); return reDownloadTopologies; }
private static Set<BasicBlock> getRangeEntries(ExceptionRangeCFG range) { Set<BasicBlock> setEntries = new HashSet<>(); Set<BasicBlock> setRange = new HashSet<>(range.getProtectedRange()); for (BasicBlock block : range.getProtectedRange()) { Set<BasicBlock> setPreds = new HashSet<>(block.getPreds()); setPreds.removeAll(setRange); if (!setPreds.isEmpty()) { setEntries.add(block); } } return setEntries; }
@Override public boolean removeAll(Collection<?> c) { try { return super.removeAll(checkNotNull(c)); } catch (UnsupportedOperationException e) { Set<K> toRemove = Sets.newHashSet(); for (Entry<K, V> entry : map().entrySet()) { if (c.contains(entry.getValue())) { toRemove.add(entry.getKey()); } } return map().keySet().removeAll(toRemove); } }
Set<Feature<?>> computeMultimapGetFeatures(Set<Feature<?>> multimapFeatures) { Set<Feature<?>> derivedFeatures = Helpers.copyToSet(multimapFeatures); for (Entry<Feature<?>, Feature<?>> entry : GET_FEATURE_MAP.entries()) { if (derivedFeatures.contains(entry.getKey())) { derivedFeatures.add(entry.getValue()); } } if (derivedFeatures.remove(MultimapFeature.VALUE_COLLECTIONS_SUPPORT_ITERATOR_REMOVE)) { derivedFeatures.add(CollectionFeature.SUPPORTS_ITERATOR_REMOVE); } if (!derivedFeatures.contains(CollectionFeature.SERIALIZABLE_INCLUDING_VIEWS)) { derivedFeatures.remove(CollectionFeature.SERIALIZABLE); } derivedFeatures.removeAll(GET_FEATURE_MAP.keySet()); return derivedFeatures; }
private Set<String> getIncrementProviders(Collection<String> oldProviders, Collection<String> newProviders) { final Set<String> incNames = new HashSet<>(newProviders); incNames.removeAll(oldProviders); if (!incNames.isEmpty()) { announceWarningOrException("found added providers: " + incNames.toString() + "\n currently tinker does not support increase new providers, " + "such these changes would not take effect."); } return incNames; }