private long findRoofId( long floorId ) { int rest = (int) (floorId % batchSize); return max( rest == 0 ? floorId + batchSize : floorId + batchSize - rest, lowIncluded ); }
private long findFloorId( long roofId ) { int rest = (int) (roofId % batchSize); return max( rest == 0 ? roofId - batchSize : roofId - rest, lowIncluded ); }
@Override public boolean id( long id ) { nodeRecord.setId( id ); highestId = max( highestId, id ); return true; }
public static long highestMemoryUsageOf( MemoryStatsVisitor.Visitable... memoryUsers ) { long max = 0; for ( MemoryStatsVisitor.Visitable visitable : memoryUsers ) { max = max( max, totalMemoryUsageOf( visitable ) ); } return max; } }
private void updatePeakMemoryUsage() { peakMemoryUsage = max( peakMemoryUsage, totalMemoryUsageOf( nodeRelationshipCache, idMapper, neoStore ) ); }
@Override public LongIterator nextBatch() { if ( endId <= lowIncluded ) { return null; } long startId = findFloorId( endId ); final LongIterator result = range( startId, endId - 1 /*excluded*/ ); endId = max( lowIncluded, startId ); return result; }
public StringCollisionValues( NumberArrayFactory factory, long length ) { // Let's have length (also chunk size) be divisible by PAGE_SIZE, such that our calculations below // works for all NumberArray implementations. int remainder = (int) (length % PAGE_SIZE); if ( remainder != 0 ) { length += PAGE_SIZE - remainder; } chunkSize = max( length, PAGE_SIZE ); cache = factory.newDynamicByteArray( chunkSize, new byte[1] ); current = cache.at( 0 ); }
private static long highestRangeId( Labels[] data ) { long highest = 0; for ( Labels labels : data ) { Pair<LabelScanKey,LabelScanValue> highestEntry = labels.entries.get( labels.entries.size() - 1 ); highest = max( highest, highestEntry.first().idRange ); } return highest; }
/** * Optimizes the relationship groups store by physically locating groups for each node together. */ public void defragmentRelationshipGroups() { // Defragment relationships groups for better performance new RelationshipGroupDefragmenter( config, executionMonitor, RelationshipGroupDefragmenter.Monitor.EMPTY, numberArrayFactory ) .run( max( maxMemory, peakMemoryUsage ), neoStore, neoStore.getNodeStore().getHighId() ); }
@Override public void addInput(GroupByIdBlock groupIdsBlock, Page page) { Block[] blocks = new Block[page.getChannelCount() + 1]; for (int i = 0; i < page.getChannelCount(); i++) { blocks[i] = page.getBlock(i); } // Add group id block blocks[page.getChannelCount()] = groupIdsBlock; groupCount = max(groupCount, groupIdsBlock.getGroupCount()); pagesIndex.addPage(new Page(blocks)); }
void collect() { totalTimeMillis = currentTimeMillis() - startTime; stageVmPauseTime = vmPauseTimeAccumulator.getPauseTime() - baseVmPauseTime; long lastDoneBatches = doneBatches; for ( Step<?> step : execution.steps() ) { StepStats stats = step.stats(); Stat memoryUsageStat = stats.stat( Keys.memory_usage ); if ( memoryUsageStat != null ) { memoryUsage = max( memoryUsage, memoryUsageStat.asLong() ); } Stat ioStat = stats.stat( Keys.io_throughput ); if ( ioStat != null ) { ioThroughput = ioStat.asLong(); } lastDoneBatches = stats.stat( Keys.done_batches ).asLong(); } doneBatches = lastDoneBatches; } }
this.progress = max( this.progress, progress );
private static <RECORD extends AbstractBaseRecord> void assertSameStoreContents( RecordStore<RECORD> store1, RecordStore<RECORD> store2 ) { long highId1 = store1.getHighId(); long highId2 = store2.getHighId(); long maxHighId = max( highId1, highId2 ); RECORD record1 = store1.newRecord(); RECORD record2 = store2.newRecord(); for ( long id = store1.getNumberOfReservedLowIds(); id < maxHighId; id++ ) { store1.getRecord( id, record1, RecordLoad.CHECK ); store2.getRecord( id, record2, RecordLoad.CHECK ); assertEquals( record1, record2 ); } }
@Override public SummaryCollection get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { long nanosLeft = unit.toNanos(timeout); long t1, t2; CompletableFuture<ProcessedFiles> futureRef = updateFuture(); t1 = System.nanoTime(); ProcessedFiles processedFiles = futureRef.get(Long.max(1, nanosLeft), TimeUnit.NANOSECONDS); t2 = System.nanoTime(); nanosLeft -= (t2 - t1); while (processedFiles.failedFiles.size() > 0) { futureRef = updateFuture(); t1 = System.nanoTime(); processedFiles = futureRef.get(Long.max(1, nanosLeft), TimeUnit.NANOSECONDS); t2 = System.nanoTime(); nanosLeft -= (t2 - t1); } return processedFiles.summaries; }
@Override public Generator<DynamicRecord> dynamic() { return ( recordSize, format, recordId ) -> { int dataSize = recordSize - format.getRecordHeaderSize(); int length = random.nextBoolean() ? dataSize : random.nextInt( dataSize ); long next = length == dataSize ? randomLong( propertyBits ) : nullValue; DynamicRecord record = new DynamicRecord( max( 1, recordId ) ).initialize( random.nextBoolean(), random.nextBoolean(), next, random.nextInt( PropertyType.values().length ), length ); byte[] bytes = random.nextByteArray( record.getLength(), record.getLength() ).asObjectCopy(); record.setData( bytes ); return record; }; }
private long findRoofId( long floorId ) { int rest = (int) (floorId % batchSize); return max( rest == 0 ? floorId + batchSize : floorId + batchSize - rest, lowIncluded ); }
private long findFloorId( long roofId ) { int rest = (int) (roofId % batchSize); return max( rest == 0 ? roofId - batchSize : roofId - rest, lowIncluded ); }
private synchronized long getMax(Setting setting) { Long pendingValue = pending.get(setting); long currentValue = current.get(setting).longValue(); if (pendingValue == null) { return currentValue; } else { return Long.max(pendingValue.longValue(), currentValue); } }
@Override public long executeLargeUpdate(String sql, int[] columnIndexes) throws SQLException { if (execute(sql, columnIndexes)) { throw NO_RESULT_COUNT_AVAILABLE; } return max(getLargeUpdateCount(), 0); }
@Override public LiveInstances applyTo(LiveInstances state, Revision newRevision) { Map<String, Long> timestamps = new HashMap<>(state.liveInstances); long vectorTime = Long.max(timestamps.values().stream().max(Long::compare).get(), timestamp); timestamps.put(name, timestamp); return new LiveInstances(state.scopedStreamName, newRevision, Collections.unmodifiableMap(timestamps), vectorTime); } }