MapMaker setKeyStrength(Strength strength) { checkState(keyStrength == null, "Key strength was already set to %s", keyStrength); keyStrength = checkNotNull(strength); if (strength != Strength.STRONG) { // STRONG could be used during deserialization. useCustomMap = true; } return this; }
private static <T> T getValue(Map<String, T> map, String variableName) { checkState(variableName != null, "variableName is null"); T value = map.get(variableName); checkState(value != null, "value for variable '%s' is null", variableName); return value; }
/** * Sets the minimum total size for the internal hash tables. For example, if the initial capacity * is {@code 60}, and the concurrency level is {@code 8}, then eight segments are created, each * having a hash table of size eight. Providing a large enough estimate at construction time * avoids the need for expensive resizing operations later, but setting this value unnecessarily * high wastes memory. * * @return this {@code CacheBuilder} instance (for chaining) * @throws IllegalArgumentException if {@code initialCapacity} is negative * @throws IllegalStateException if an initial capacity was already set */ public CacheBuilder<K, V> initialCapacity(int initialCapacity) { checkState( this.initialCapacity == UNSET_INT, "initial capacity was already set to %s", this.initialCapacity); checkArgument(initialCapacity >= 0); this.initialCapacity = initialCapacity; return this; }
private static Document orPredicate(List<Document> values) { checkState(!values.isEmpty()); if (values.size() == 1) { return values.get(0); } return new Document(OR_OP, values); }
public synchronized LocalExchangeSource getNextSource() { checkState(nextSourceIndex < sources.size(), "All operators already created"); LocalExchangeSource result = sources.get(nextSourceIndex); nextSourceIndex++; return result; }
public Builder put(Symbol symbol, Expression expression) { if (assignments.containsKey(symbol)) { Expression assignment = assignments.get(symbol); checkState( assignment.equals(expression), "Symbol %s already has assignment %s, while adding %s", symbol, assignment, expression); } assignments.put(symbol, expression); return this; }
@Override public DbIdsRepository setDeveloperId(Developer developer, long developerId) { Long existingId = developerIdsByKey.get(developer); checkState(existingId == null, "Id '%s' is already registered in repository for Developer '%s', can not set new id '%s'", existingId, developer, developerId); developerIdsByKey.put(developer, developerId); return this; }
protected final void initAddColumn(TblColRef col) { if (columnIndexMap.containsKey(col)) return; int columnIndex = columnIndexMap.size(); columnIndexMap.put(col, columnIndex); columnList.add(col); columnCount = columnIndexMap.size(); Preconditions.checkState(columnIndexMap.size() == columnList.size()); }
private List<Comparable<?>> getOrderValues() throws SQLException { List<Comparable<?>> result = new ArrayList<>(orderByItems.size()); for (OrderItem each : orderByItems) { Object value = queryResult.getValue(each.getIndex(), Object.class); Preconditions.checkState(null == value || value instanceof Comparable, "Order by value must implements Comparable"); result.add((Comparable<?>) value); } return result; }
@Override public ISqlStreamsDataSource constructStreams(URI uri, String inputFormatClass, String outputFormatClass, Properties properties, List<FieldInfo> fields) { List<String> fieldNames = new ArrayList<>(); int primaryIndex = -1; for (int i = 0; i < fields.size(); ++i) { FieldInfo f = fields.get(i); fieldNames.add(f.name()); if (f.isPrimary()) { primaryIndex = i; } } Preconditions.checkState(primaryIndex != -1, "Kafka stream table must have a primary key"); Scheme scheme = SerdeUtils.getScheme(inputFormatClass, properties, fieldNames); Map<String, String> values = parseUriParams(uri.getQuery()); String bootstrapServers = values.get(URI_PARAMS_BOOTSTRAP_SERVERS); Preconditions.checkNotNull(bootstrapServers, "bootstrap-servers must be specified"); String topic = uri.getHost(); KafkaSpoutConfig<ByteBuffer, ByteBuffer> kafkaSpoutConfig = new KafkaSpoutConfig.Builder<ByteBuffer, ByteBuffer>(bootstrapServers, topic) .setProp(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteBufferDeserializer.class) .setProp(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteBufferDeserializer.class) .setProp(ConsumerConfig.GROUP_ID_CONFIG, "storm-sql-kafka-" + UUID.randomUUID().toString()) .setRecordTranslator(new RecordTranslatorSchemeAdapter(scheme)) .build(); IOutputSerializer serializer = SerdeUtils.getSerializer(outputFormatClass, properties, fieldNames); return new KafkaStreamsDataSource(kafkaSpoutConfig, bootstrapServers, topic, properties, serializer); }
@Override public BytecodeNode generateExpression(Signature signature, BytecodeGeneratorContext context, Type returnType, List<RowExpression> arguments, Optional<Variable> outputBlockVariable) { // Bind expression is used to generate captured lambda. // It takes the captured values and the uncaptured lambda, and produces captured lambda as the output. // The uncaptured lambda is just a method, and does not have a stack representation during execution. // As a result, the bind expression generates the captured lambda in one step. // outputBlockVariable cannot present because // 1. bind cannot be in the top level of an expression // 2. lambda cannot be put into blocks. checkArgument(!outputBlockVariable.isPresent()); int numCaptures = arguments.size() - 1; LambdaDefinitionExpression lambda = (LambdaDefinitionExpression) arguments.get(numCaptures); checkState(compiledLambdaMap.containsKey(lambda), "lambda expressions map does not contain this lambda definition"); CompiledLambda compiledLambda = compiledLambdaMap.get(lambda); return LambdaBytecodeGenerator.generateLambda( context, arguments.subList(0, numCaptures), compiledLambda, lambdaInterface); } }
private static PrestoThriftPageResult toThriftPage(Page page, List<Type> columnTypes, @Nullable PrestoThriftId nextToken) { if (page == null) { checkState(nextToken == null, "there must be no more data when page is null"); return new PrestoThriftPageResult(ImmutableList.of(), 0, null); } checkState(page.getChannelCount() == columnTypes.size(), "number of columns in a page doesn't match the one in requested types"); int numberOfColumns = columnTypes.size(); List<PrestoThriftBlock> columnBlocks = new ArrayList<>(numberOfColumns); for (int i = 0; i < numberOfColumns; i++) { columnBlocks.add(fromBlock(page.getBlock(i), columnTypes.get(i))); } return new PrestoThriftPageResult(columnBlocks, page.getPositionCount(), nextToken); }
public double getSampleRatio(SampledRelation relation) { NodeRef<SampledRelation> key = NodeRef.of(relation); checkState(sampleRatios.containsKey(key), "Sample ratio missing for %s. Broken analysis?", relation); return sampleRatios.get(key); }
@Override public Object getValue(final String columnLabel, final Class<?> type) { Preconditions.checkState(labelAndIndexMap.containsKey(columnLabel), "Can't find columnLabel: %s", columnLabel); return currentRow.get(labelAndIndexMap.get(columnLabel) - 1); }
@Override public List<DataSegment> next() { if (!hasNext()) { throw new NoSuchElementException(); } final QueueEntry entry = queue.poll(); if (entry == null) { throw new NoSuchElementException(); } final List<DataSegment> resultSegments = entry.segments; Preconditions.checkState(!resultSegments.isEmpty(), "Queue entry must not be empty"); final String dataSource = resultSegments.get(0).getDataSource(); updateQueue(dataSource, compactionConfigs.get(dataSource)); return resultSegments; }
@Override public void setOriginalFile(Component file, OriginalFile originalFile) { requireNonNull(file, "file can't be null"); requireNonNull(originalFile, "originalFile can't be null"); checkArgument(file.getType() == Component.Type.FILE, "file must be of type FILE"); OriginalFile existingOriginalFile = originalFiles.get(file.getDbKey()); checkState(existingOriginalFile == null || existingOriginalFile.equals(originalFile), "Original file %s already registered for file %s. Unable to register %s.", existingOriginalFile, file, originalFile); if (existingOriginalFile == null) { originalFiles.put(file.getDbKey(), originalFile); } }
public Block readBlock(int columnIndex) throws IOException { checkArgument(readColumns.containsKey(columnIndex), "Column %s is not being read", columnIndex); checkState(currentChunkRowCount > 0, "No more data"); if (columnIndex >= columns.length) { Type type = readColumns.get(columnIndex); Block nullBlock = type.createBlockBuilder(null, 1, 0).appendNull().build(); return new RunLengthEncodedBlock(nullBlock, currentChunkRowCount); } return columns[columnIndex].readBlock(rowGroupPosition, currentChunkRowCount); }
/** * Creates a new locked inode path, using a prefix locked inode path as a starting point. * * @param uri the uri for the new path * @param path the path to use as a starting point * @param pathComponents components of the uri * @param lockPattern the pattern to lock in */ private LockedInodePath(AlluxioURI uri, LockedInodePath path, String[] pathComponents, LockPattern lockPattern) { Preconditions.checkState(!path.mExistingInodes.isEmpty()); Preconditions.checkState(!path.mLockList.isEmpty()); mUri = uri; mPathComponents = pathComponents; mInodeStore = path.mInodeStore; mExistingInodes = new ArrayList<>(path.mExistingInodes); mLockList = new CompositeInodeLockList(path.mLockList); mLockPattern = lockPattern; mRoot = mExistingInodes.get(0); }
@Override public void enqueue(List<SerializedPage> pages) { checkState(partitions.size() == 1, "Expected exactly one partition"); enqueue(0, pages); }
@Override public synchronized PartitioningSpillResult partitionAndSpill(Page page, IntPredicate spillPartitionMask) { requireNonNull(page, "page is null"); requireNonNull(spillPartitionMask, "spillPartitionMask is null"); checkArgument(page.getChannelCount() == types.size(), "Wrong page channel count, expected %s but got %s", types.size(), page.getChannelCount()); checkState(!readingStarted, "reading already started"); IntArrayList unspilledPositions = partitionPage(page, spillPartitionMask); ListenableFuture<?> future = flushFullBuilders(); return new PartitioningSpillResult(future, page.getPositions(unspilledPositions.elements(), 0, unspilledPositions.size())); }