@Override public Stream<E> parallelStream() { synchronized (mutex) { return delegate().parallelStream(); } }
@Override public Stream<T> build(boolean parallel) { return parallel ? collection.parallelStream() : collection.stream(); } }
@Override public Stream<E> parallelStream() { synchronized (mutex) { return delegate().parallelStream(); } }
List<String> destList = new ArrayList<>(Arrays.asList("foo")); List<String> newList = Arrays.asList("0", "1", "2", "3", "4", "5"); newList.parallelStream() .collect(Collectors.toCollection(() -> destList)); System.out.println(destList);
private int fragmentedCpu() { Double res = nodeIdToResources.get().values().parallelStream().filter(this::isFragmented) .mapToDouble(SupervisorResources::getAvailableCpu).filter(x -> x > 0).sum(); return res.intValue(); }
private double fragmentedMemory() { Double res = nodeIdToResources.get().values().parallelStream().filter(this::isFragmented) .mapToDouble(SupervisorResources::getAvailableMem).filter(x -> x > 0).sum(); return res.intValue(); }
@Override public void destroy() { groupSenders.values().parallelStream().forEach(SenderGroup::destroy); groupSenders.clear(); } }
@Override public void run() { try { Collection<Session> sessions = sessionManager.sessionMap().values(); sessions.parallelStream().filter(this::expires).forEach(sessionManager::destorySession); } catch (Exception e) { log.error("Session clean error", e); } }
@Override public void run() { try { Collection<Session> sessions = sessionManager.sessionMap().values(); sessions.parallelStream().filter(this::expires).forEach(sessionManager::destorySession); } catch (Exception e) { log.error("Session clean error", e); } }
public static final int LIMIT = 500_000_000; public static final long VALUE = 3L; public long range() { return LongStream.range(0, LIMIT) .parallel() .map(i -> VALUE) .map(i -> i % 73 % 13) .sum(); } public long ncopies() { return Collections.nCopies(LIMIT, VALUE) .parallelStream() .mapToLong(i -> i) .map(i -> i % 73 % 13) .sum(); }
@Override public Stream<E> parallelStream() { synchronized (mutex) { return delegate().parallelStream(); } }
/** * Scans the given package names. * * @param annotation The annotation name to scan * @param packages The package names * @return A stream of classes */ default Stream<Class> scan(String annotation, Collection<String> packages) { return scan(annotation, packages.parallelStream()); }
/** * Scans the given package names. * * @param annotation The annotation name to scan * @param packages The package names * @return A stream of classes */ default Stream<Class> scan(Class<? extends Annotation> annotation, Collection<String> packages) { return scan(annotation.getName(), packages.parallelStream()); }
public abstract class MyOptimizedCollection<E> implements Collection<E>{ private enum OperatingSystem{ LINUX, WINDOWS, ANDROID } private OperatingSystem operatingSystem = OperatingSystem.WINDOWS; private int numberOfCores = Runtime.getRuntime().availableProcessors(); private Collection<E> delegate; @Override public Stream<E> parallelStream() { if (!System.getProperty("parallelSupport").equals("true")) { return this.delegate.stream(); } switch (operatingSystem) { case WINDOWS: if (numberOfCores > 3 && delegate.size() > 10000) { return this.delegate.parallelStream(); }else{ return this.delegate.stream(); } case LINUX: return SomeVerySpecialStreamImplementation.stream(this.delegate.spliterator()); case ANDROID: default: return this.delegate.stream(); } } }
Objects.requireNonNull(predicateFunction, "root predicate function can not be null"); Supplier<Stream<N>> streamSupplier = () -> dataList.size() < 1000 ? dataList.stream() : dataList.parallelStream();
Stream<String> parallelStream = collection.parallelStream(); System.out.println("parallelStream = " + parallelStream.collect(Collectors.toList())); // print parallelStream = [a1, a2, a3]
void reValidate(final Map<Long, Long> offsets, int singleMessageLimitSize) { segments.values().parallelStream().forEach(segment -> { Long offset = offsets.get(segment.getSegmentBaseOffset()); long wrotePosition = segment.getWrotePosition(); if (null == offset || offset != wrotePosition) { offset = doValidate((ScheduleSetSegment) segment, singleMessageLimitSize); } else { offset = wrotePosition; } ((ScheduleSetSegment) segment).loadOffset(offset); }); }
input.getJarInputs().parallelStream().forEach(jarInput -> { File src = jarInput.getFile(); File dst = invocation.getOutputProvider().getContentLocation( input.getDirectoryInputs().parallelStream().forEach(directoryInput -> { File src = directoryInput.getFile(); File dst = invocation.getOutputProvider().getContentLocation(
public StopTreeCache (Graph graph, int maxWalkMeters) { this.maxWalkMeters = maxWalkMeters; LOG.info("Caching distances to nearby street intersections from each transit stop..."); graph.index.stopVertexForStop.values().parallelStream().forEach(tstop -> { RoutingRequest rr = new RoutingRequest(TraverseMode.WALK); rr.batch = (true);
private void processHttpMethods(Collection<BeanDefinition<?>> clientBeans) { for (BeanDefinition<?> clientBean : clientBeans) { final Optional<Class[]> additionalTypes = clientBean.getValue(TypeHint.class, Class[].class); additionalTypes.ifPresent(classes -> { for (Class aClass : classes) { ClassLoadingReporter.reportBeanPresent(aClass); } }); final Collection<? extends ExecutableMethod<?, ?>> executableMethods = clientBean.getExecutableMethods(); executableMethods.parallelStream().forEach((Consumer<ExecutableMethod<?, ?>>) executableMethod -> { if (executableMethod.hasStereotype(HttpMethodMapping.class)) { final ReturnType<?> returnType = executableMethod.getReturnType(); final Class<?> javaType = returnType.getType(); if (!ClassUtils.isJavaLangType(javaType)) { ClassLoadingReporter.reportBeanPresent(javaType); } reportArguments(returnType.getTypeParameters()); reportArguments(executableMethod.getArguments()); } }); } }